diff options
| author | Ingo Molnar <mingo@kernel.org> | 2024-02-14 12:45:07 +0300 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2024-02-14 12:45:07 +0300 |
| commit | 03c11eb3b16dc0058589751dfd91f254be2be613 (patch) | |
| tree | e5f2889212fec0bb0babdce9abd781ab487e246a /include | |
| parent | de8c6a352131f642b82474abe0cbb5dd26a7e081 (diff) | |
| parent | 841c35169323cd833294798e58b9bf63fa4fa1de (diff) | |
| download | linux-03c11eb3b16dc0058589751dfd91f254be2be613.tar.xz | |
Merge tag 'v6.8-rc4' into x86/percpu, to resolve conflicts and refresh the branch
Conflicts:
arch/x86/include/asm/percpu.h
arch/x86/include/asm/text-patching.h
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
1038 files changed, 35905 insertions, 11973 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 254685085c82..e4d24d3f9abb 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -12,11 +12,9 @@ #include <linux/device.h> #include <linux/property.h> -/* TBD: Make dynamic */ -#define ACPI_MAX_HANDLES 10 struct acpi_handle_list { u32 count; - acpi_handle handles[ACPI_MAX_HANDLES]; + acpi_handle *handles; }; /* acpi_utils.h */ @@ -27,11 +25,15 @@ acpi_status acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, unsigned long long *data); -acpi_status -acpi_evaluate_reference(acpi_handle handle, - acpi_string pathname, - struct acpi_object_list *arguments, - struct acpi_handle_list *list); +bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, + struct acpi_object_list *arguments, + struct acpi_handle_list *list); +bool acpi_handle_list_equal(struct acpi_handle_list *list1, + struct acpi_handle_list *list2); +void acpi_handle_list_replace(struct acpi_handle_list *dst, + struct acpi_handle_list *src); +void acpi_handle_list_free(struct acpi_handle_list *list); +bool acpi_device_dep(acpi_handle target, acpi_handle match); acpi_status acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, struct acpi_buffer *status_buf); @@ -363,6 +365,98 @@ struct acpi_device_data { struct acpi_gpio_mapping; +#define ACPI_DEVICE_SWNODE_ROOT 0 + +/* + * The maximum expected number of CSI-2 data lanes. + * + * This number is not expected to ever have to be equal to or greater than the + * number of bits in an unsigned long variable, but if it needs to be increased + * above that limit, code will need to be adjusted accordingly. + */ +#define ACPI_DEVICE_CSI2_DATA_LANES 8 + +#define ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH 8 + +enum acpi_device_swnode_dev_props { + ACPI_DEVICE_SWNODE_DEV_ROTATION, + ACPI_DEVICE_SWNODE_DEV_CLOCK_FREQUENCY, + ACPI_DEVICE_SWNODE_DEV_LED_MAX_MICROAMP, + ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_MICROAMP, + ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_TIMEOUT_US, + ACPI_DEVICE_SWNODE_DEV_NUM_OF, + ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES +}; + +enum acpi_device_swnode_port_props { + ACPI_DEVICE_SWNODE_PORT_REG, + ACPI_DEVICE_SWNODE_PORT_NUM_OF, + ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES +}; + +enum acpi_device_swnode_ep_props { + ACPI_DEVICE_SWNODE_EP_REMOTE_EP, + ACPI_DEVICE_SWNODE_EP_BUS_TYPE, + ACPI_DEVICE_SWNODE_EP_REG, + ACPI_DEVICE_SWNODE_EP_CLOCK_LANES, + ACPI_DEVICE_SWNODE_EP_DATA_LANES, + ACPI_DEVICE_SWNODE_EP_LANE_POLARITIES, + /* TX only */ + ACPI_DEVICE_SWNODE_EP_LINK_FREQUENCIES, + ACPI_DEVICE_SWNODE_EP_NUM_OF, + ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES +}; + +/* + * Each device has a root software node plus two times as many nodes as the + * number of CSI-2 ports. + */ +#define ACPI_DEVICE_SWNODE_PORT(port) (2 * (port) + 1) +#define ACPI_DEVICE_SWNODE_EP(endpoint) \ + (ACPI_DEVICE_SWNODE_PORT(endpoint) + 1) + +/** + * struct acpi_device_software_node_port - MIPI DisCo for Imaging CSI-2 port + * @port_name: Port name. + * @data_lanes: "data-lanes" property values. + * @lane_polarities: "lane-polarities" property values. + * @link_frequencies: "link_frequencies" property values. + * @port_nr: Port number. + * @crs_crs2_local: _CRS CSI2 record present (i.e. this is a transmitter one). + * @port_props: Port properties. + * @ep_props: Endpoint properties. + * @remote_ep: Reference to the remote endpoint. + */ +struct acpi_device_software_node_port { + char port_name[ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH + 1]; + u32 data_lanes[ACPI_DEVICE_CSI2_DATA_LANES]; + u32 lane_polarities[ACPI_DEVICE_CSI2_DATA_LANES + 1 /* clock lane */]; + u64 link_frequencies[ACPI_DEVICE_CSI2_DATA_LANES]; + unsigned int port_nr; + bool crs_csi2_local; + + struct property_entry port_props[ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES]; + struct property_entry ep_props[ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES]; + + struct software_node_ref_args remote_ep[1]; +}; + +/** + * struct acpi_device_software_nodes - Software nodes for an ACPI device + * @dev_props: Device properties. + * @nodes: Software nodes for root as well as ports and endpoints. + * @nodeprts: Array of software node pointers, for (un)registering them. + * @ports: Information related to each port and endpoint within a port. + * @num_ports: The number of ports. + */ +struct acpi_device_software_nodes { + struct property_entry dev_props[ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES]; + struct software_node *nodes; + const struct software_node **nodeptrs; + struct acpi_device_software_node_port *ports; + unsigned int num_ports; +}; + /* Device */ struct acpi_device { u32 pld_crc; @@ -381,6 +475,7 @@ struct acpi_device { struct acpi_device_data data; struct acpi_scan_handler *handler; struct acpi_hotplug_context *hp; + struct acpi_device_software_nodes *swnodes; const struct acpi_gpio_mapping *driver_gpios; void *driver_data; struct device dev; @@ -517,7 +612,7 @@ int acpi_bus_attach_private_data(acpi_handle, void *); void acpi_bus_detach_private_data(acpi_handle); int acpi_dev_install_notify_handler(struct acpi_device *adev, u32 handler_type, - acpi_notify_handler handler); + acpi_notify_handler handler, void *context); void acpi_dev_remove_notify_handler(struct acpi_device *adev, u32 handler_type, acpi_notify_handler handler); @@ -539,6 +634,7 @@ int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); int acpi_device_fix_up_power(struct acpi_device *device); void acpi_device_fix_up_power_extended(struct acpi_device *adev); +void acpi_device_fix_up_power_children(struct acpi_device *adev); int acpi_bus_update_power(acpi_handle handle, int *state_p); int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); @@ -623,6 +719,8 @@ struct acpi_pci_root { /* helper */ +struct iommu_ops; + bool acpi_dma_supported(const struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); int acpi_iommu_fwspec_init(struct device *dev, u32 id, @@ -760,9 +858,71 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev) adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set); } -bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2); int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer); +static inline bool acpi_dev_hid_match(struct acpi_device *adev, const char *hid2) +{ + const char *hid1 = acpi_device_hid(adev); + + return hid1 && hid2 && !strcmp(hid1, hid2); +} + +static inline bool acpi_str_uid_match(struct acpi_device *adev, const char *uid2) +{ + const char *uid1 = acpi_device_uid(adev); + + return uid1 && uid2 && !strcmp(uid1, uid2); +} + +static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2) +{ + u64 uid1; + + return !acpi_dev_uid_to_integer(adev, &uid1) && uid1 == uid2; +} + +#define TYPE_ENTRY(type, x) \ + const type: x, \ + type: x + +#define ACPI_STR_TYPES(match) \ + TYPE_ENTRY(unsigned char *, match), \ + TYPE_ENTRY(signed char *, match), \ + TYPE_ENTRY(char *, match), \ + TYPE_ENTRY(void *, match) + +/** + * acpi_dev_uid_match - Match device by supplied UID + * @adev: ACPI device to match. + * @uid2: Unique ID of the device. + * + * Matches UID in @adev with given @uid2. + * + * Returns: %true if matches, %false otherwise. + */ +#define acpi_dev_uid_match(adev, uid2) \ + _Generic(uid2, \ + /* Treat @uid2 as a string for acpi string types */ \ + ACPI_STR_TYPES(acpi_str_uid_match), \ + /* Treat as an integer otherwise */ \ + default: acpi_int_uid_match)(adev, uid2) + +/** + * acpi_dev_hid_uid_match - Match device by supplied HID and UID + * @adev: ACPI device to match. + * @hid2: Hardware ID of the device. + * @uid2: Unique ID of the device, pass 0 or NULL to not check _UID. + * + * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2 + * will be treated as a match. If user wants to validate @uid2, it should be + * done before calling this function. + * + * Returns: %true if matches or @uid2 is 0 or NULL, %false otherwise. + */ +#define acpi_dev_hid_uid_match(adev, hid2, uid2) \ + (acpi_dev_hid_match(adev, hid2) && \ + (!(uid2) || acpi_dev_uid_match(adev, uid2))) + void acpi_dev_clear_dependencies(struct acpi_device *supplier); bool acpi_dev_ready_for_enumeration(const struct acpi_device *device); struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier, diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 8d5572ad48cb..a33375e055ad 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -465,6 +465,9 @@ struct acpi_cdat_sslbe { u16 reserved; }; +#define ACPI_CDAT_SSLBIS_US_PORT 0x0100 +#define ACPI_CDAT_SSLBIS_ANY_PORT 0xffff + /******************************************************************************* * * CEDT - CXL Early Discovery Table diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 3751ae69432f..9775384d61c6 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -1046,6 +1046,8 @@ struct acpi_madt_generic_interrupt { /* ACPI_MADT_ENABLED (1) Processor is usable if set */ #define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */ #define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */ +#define ACPI_MADT_GICC_ONLINE_CAPABLE (1<<3) /* 03: Processor is online capable */ +#define ACPI_MADT_GICC_NON_COHERENT (1<<4) /* 04: GIC redistributor is not coherent */ /* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */ @@ -1090,21 +1092,27 @@ struct acpi_madt_generic_msi_frame { struct acpi_madt_generic_redistributor { struct acpi_subtable_header header; - u16 reserved; /* reserved - must be zero */ + u8 flags; + u8 reserved; /* reserved - must be zero */ u64 base_address; u32 length; }; +#define ACPI_MADT_GICR_NON_COHERENT (1) + /* 15: Generic Translator (ACPI 6.0) */ struct acpi_madt_generic_translator { struct acpi_subtable_header header; - u16 reserved; /* reserved - must be zero */ + u8 flags; + u8 reserved; /* reserved - must be zero */ u32 translation_id; u64 base_address; u32 reserved2; }; +#define ACPI_MADT_ITS_NON_COHERENT (1) + /* 16: Multiprocessor wakeup (ACPI 6.4) */ struct acpi_madt_multiproc_wakeup { diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 6126c977ece0..3a0995f8bce8 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -144,6 +144,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_set_enable(int cpu, bool enable); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); extern bool cppc_perf_ctrs_in_pcc(void); +extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf); +extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq); extern bool acpi_cpc_valid(void); extern bool cppc_allow_fast_switch(void); extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data); diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 3c8bba9f1114..be1dd4c1a917 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -73,8 +73,12 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb); void ghes_unregister_vendor_record_notifier(struct notifier_block *nb); struct list_head *ghes_get_devices(void); + +void ghes_estatus_pool_region_free(unsigned long addr, u32 size); #else static inline struct list_head *ghes_get_devices(void) { return NULL; } + +static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; } #endif int ghes_estatus_pool_init(unsigned int num_ghes); diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h index 73e806fe7ce7..9b373d172a77 100644 --- a/include/acpi/pcc.h +++ b/include/acpi/pcc.h @@ -18,7 +18,20 @@ struct pcc_mbox_chan { u16 min_turnaround_time; }; +/* Generic Communications Channel Shared Memory Region */ +#define PCC_SIGNATURE 0x50434300 +/* Generic Communications Channel Command Field */ +#define PCC_CMD_GENERATE_DB_INTR BIT(15) +/* Generic Communications Channel Status Field */ +#define PCC_STATUS_CMD_COMPLETE BIT(0) +#define PCC_STATUS_SCI_DOORBELL BIT(1) +#define PCC_STATUS_ERROR BIT(2) +#define PCC_STATUS_PLATFORM_NOTIFY BIT(3) +/* Initiator Responder Communications Channel Flags */ +#define PCC_CMD_COMPLETION_NOTIFY BIT(0) + #define MAX_PCC_SUBSPACES 256 + #ifdef CONFIG_PCC extern struct pcc_mbox_chan * pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id); diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 94181fe9780a..3f34ebb27525 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -465,9 +465,4 @@ extern int acpi_processor_ffh_lpi_probe(unsigned int cpu); extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi); #endif -#ifdef CONFIG_ACPI_HOTPLUG_CPU -extern int arch_register_cpu(int cpu); -extern void arch_unregister_cpu(int cpu); -#endif - #endif diff --git a/include/acpi/video.h b/include/acpi/video.h index 4230392b5b0b..3d538d4178ab 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -75,6 +75,15 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void) return __acpi_video_get_backlight_type(false, NULL); } +/* + * This function MUST only be called by GPU drivers to check if the driver + * should register a backlight class device. This function not only checks + * if a GPU native backlight device should be registered it *also* tells + * the ACPI video-detect code that native GPU backlight control is available. + * Therefor calling this from any place other then the GPU driver is wrong! + * To check if GPU native backlight control is used in other places instead use: + * if (acpi_video_get_backlight_type() == acpi_backlight_native) { ... } + */ static inline bool acpi_video_backlight_use_native(void) { return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native; diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 941be574bbe0..d436bee4d129 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -2,7 +2,7 @@ # # asm headers that all architectures except um should have # (This file is not included when SRCARCH=um since UML borrows several -# asm headers from the host architecutre.) +# asm headers from the host architecture.) mandatory-y += atomic.h mandatory-y += archrandom.h @@ -11,6 +11,7 @@ mandatory-y += bitops.h mandatory-y += bug.h mandatory-y += bugs.h mandatory-y += cacheflush.h +mandatory-y += cfi.h mandatory-y += checksum.h mandatory-y += compat.h mandatory-y += current.h diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h index eb64bd4f11f3..542d3727ee4e 100644 --- a/include/asm-generic/bitops/instrumented-lock.h +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -58,27 +58,25 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) return arch_test_and_set_bit_lock(nr, addr); } -#if defined(arch_clear_bit_unlock_is_negative_byte) /** - * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. - * @nr: the bit to clear - * @addr: the address to start counting from + * xor_unlock_is_negative_byte - XOR a single byte in memory and test if + * it is negative, for unlock. + * @mask: Change the bits which are set in this mask. + * @addr: The address of the word containing the byte to change. * + * Changes some of bits 0-6 in the word pointed to by @addr. * This operation is atomic and provides release barrier semantics. + * Used to optimise some folio operations which are commonly paired + * with an unlock or end of writeback. Bit 7 is used as PG_waiters to + * indicate whether anybody is waiting for the unlock. * - * This is a bit of a one-trick-pony for the filemap code, which clears - * PG_locked and tests PG_waiters, + * Return: Whether the top bit of the byte is set. */ -static inline bool -clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +static inline bool xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *addr) { kcsan_release(); - instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); - return arch_clear_bit_unlock_is_negative_byte(nr, addr); + instrument_atomic_write(addr, sizeof(long)); + return arch_xor_unlock_is_negative_byte(mask, addr); } -/* Let everybody know we have it. */ -#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte -#endif - #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 40913516e654..14d4ec8c5152 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -66,27 +66,15 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) raw_atomic_long_set_release((atomic_long_t *)p, old); } -/** - * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. - * @nr: the bit to clear - * @addr: the address to start counting from - * - * This is a bit of a one-trick-pony for the filemap code, which clears - * PG_locked and tests PG_waiters, - */ -#ifndef arch_clear_bit_unlock_is_negative_byte -static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, - volatile unsigned long *p) +#ifndef arch_xor_unlock_is_negative_byte +static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *p) { long old; - unsigned long mask = BIT_MASK(nr); - p += BIT_WORD(nr); - old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } -#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte #endif #include <asm-generic/bitops/instrumented-lock.h> diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 84ec53ccc450..7ee8a179d103 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -91,6 +91,12 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end) } #endif +#ifndef flush_cache_vmap_early +static inline void flush_cache_vmap_early(unsigned long start, unsigned long end) +{ +} +#endif + #ifndef flush_cache_vunmap static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { diff --git a/include/asm-generic/cfi.h b/include/asm-generic/cfi.h new file mode 100644 index 000000000000..41fac3537bf9 --- /dev/null +++ b/include/asm-generic/cfi.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CFI_H +#define __ASM_GENERIC_CFI_H + +#endif /* __ASM_GENERIC_CFI_H */ diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 43e18db89c14..ad928cce268b 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -2,6 +2,8 @@ #ifndef __ASM_GENERIC_CHECKSUM_H #define __ASM_GENERIC_CHECKSUM_H +#include <linux/bitops.h> + /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) @@ -31,9 +33,7 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); static inline __sum16 csum_fold(__wsum csum) { u32 sum = (__force u32)csum; - sum = (sum & 0xffff) + (sum >> 16); - sum = (sum & 0xffff) + (sum >> 16); - return (__force __sum16)~sum; + return (__force __sum16)((~sum - ror32(sum, 16)) >> 16); } #endif diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index 3df9f59a544e..f27d66fdc00a 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -34,7 +34,7 @@ static inline unsigned long __generic_cmpxchg_local(volatile void *ptr, *(u16 *)ptr = (new & 0xffffu); break; case 4: prev = *(u32 *)ptr; - if (prev == (old & 0xffffffffffu)) + if (prev == (old & 0xffffffffu)) *(u32 *)ptr = (new & 0xffffffffu); break; case 8: prev = *(u64 *)ptr; diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h index bb7ee9c70e60..6ccabb400aa6 100644 --- a/include/asm-generic/fb.h +++ b/include/asm-generic/fb.h @@ -12,14 +12,14 @@ #include <linux/pgtable.h> struct fb_info; -struct file; -#ifndef fb_pgprotect -#define fb_pgprotect fb_pgprotect -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) +#ifndef pgprot_framebuffer +#define pgprot_framebuffer pgprot_framebuffer +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) { - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + return pgprot_writecombine(prot); } #endif diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 4da02798a00b..6dcf4d576970 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -76,7 +76,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, #ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) + pte_t *ptep, pte_t pte, unsigned long sz) { set_pte_at(mm, addr, ptep, pte); } diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h deleted file mode 100644 index 81dfa3ee5e06..000000000000 --- a/include/asm-generic/ide_iops.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Generic I/O and MEMIO string operations. */ - -#define __ide_insw insw -#define __ide_insl insl -#define __ide_outsw outsw -#define __ide_outsl outsl - -static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count) -{ - while (count--) { - *(u16 *)addr = readw(port); - addr += 2; - } -} - -static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count) -{ - while (count--) { - *(u32 *)addr = readl(port); - addr += 4; - } -} - -static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) -{ - while (count--) { - writew(*(u16 *)addr, port); - addr += 2; - } -} - -static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) -{ - while (count--) { - writel(*(u32 *)addr, port); - addr += 4; - } -} diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index cecd2b7bd033..430f0ae0dde2 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -36,6 +36,7 @@ struct ms_hyperv_info { u32 nested_features; u32 max_vp_index; u32 max_lp_index; + u8 vtl; union { u32 isolation_config_a; struct { @@ -54,7 +55,6 @@ struct ms_hyperv_info { }; }; u64 shared_gpa_boundary; - u8 vtl; }; extern struct ms_hyperv_info ms_hyperv; extern bool hv_nested; diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h index 1a3ad6d29833..c32e0cf23c90 100644 --- a/include/asm-generic/numa.h +++ b/include/asm-generic/numa.h @@ -35,6 +35,7 @@ int __init numa_add_memblk(int nodeid, u64 start, u64 end); void __init numa_set_distance(int from, int to, int distance); void __init numa_free_distance(void); void __init early_map_cpu_to_node(unsigned int cpu, int nid); +int __init early_cpu_to_node(int cpu); void numa_store_cpu_info(unsigned int cpu); void numa_add_cpu(unsigned int cpu); void numa_remove_cpu(unsigned int cpu); @@ -46,6 +47,7 @@ static inline void numa_add_cpu(unsigned int cpu) { } static inline void numa_remove_cpu(unsigned int cpu) { } static inline void arch_numa_init(void) { } static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } +static inline int early_cpu_to_node(int cpu) { return 0; } #endif /* CONFIG_NUMA */ diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index c75d4a753849..879e5f8aa5e9 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -169,6 +169,8 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) ptdesc = pagetable_alloc(gfp, 0); if (!ptdesc) return NULL; + + pagetable_pud_ctor(ptdesc); return ptdesc_address(ptdesc); } @@ -190,8 +192,11 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline void __pud_free(struct mm_struct *mm, pud_t *pud) { + struct ptdesc *ptdesc = virt_to_ptdesc(pud); + BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - pagetable_free(virt_to_ptdesc(pud)); + pagetable_pud_dtor(ptdesc); + pagetable_free(ptdesc); } #ifndef __HAVE_ARCH_PUD_FREE diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 995513fa2690..0655aa5b57b2 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -70,7 +70,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) */ static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) { - return !atomic_read(&lock.val); + return !lock.val.counter; } /** diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index fdfebcb050f4..90803a826ba0 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -68,11 +68,18 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) smp_store_release(ptr, (u16)val + 1); } +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + u32 val = lock.counter; + + return ((val >> 16) == (val & 0xffff)); +} + static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) { - u32 val = atomic_read(lock); + arch_spinlock_t val = READ_ONCE(*lock); - return ((val >> 16) != (val & 0xffff)); + return !arch_spin_value_unlocked(val); } static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) @@ -82,11 +89,6 @@ static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) return (s16)((val >> 16) - (val & 0xffff)) > 1; } -static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return !arch_spin_is_locked(&lock); -} - #include <asm/qrwlock.h> #endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 699650f81970..a84c64e5f11e 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -104,9 +104,9 @@ static inline u32 get_unaligned_le24(const void *p) static inline void __put_unaligned_be24(const u32 val, u8 *p) { - *p++ = val >> 16; - *p++ = val >> 8; - *p++ = val; + *p++ = (val >> 16) & 0xff; + *p++ = (val >> 8) & 0xff; + *p++ = val & 0xff; } static inline void put_unaligned_be24(const u32 val, void *p) @@ -116,9 +116,9 @@ static inline void put_unaligned_be24(const u32 val, void *p) static inline void __put_unaligned_le24(const u32 val, u8 *p) { - *p++ = val; - *p++ = val >> 8; - *p++ = val >> 16; + *p++ = val & 0xff; + *p++ = (val >> 8) & 0xff; + *p++ = (val >> 16) & 0xff; } static inline void put_unaligned_le24(const u32 val, void *p) @@ -128,12 +128,12 @@ static inline void put_unaligned_le24(const u32 val, void *p) static inline void __put_unaligned_be48(const u64 val, u8 *p) { - *p++ = val >> 40; - *p++ = val >> 32; - *p++ = val >> 24; - *p++ = val >> 16; - *p++ = val >> 8; - *p++ = val; + *p++ = (val >> 40) & 0xff; + *p++ = (val >> 32) & 0xff; + *p++ = (val >> 24) & 0xff; + *p++ = (val >> 16) & 0xff; + *p++ = (val >> 8) & 0xff; + *p++ = val & 0xff; } static inline void put_unaligned_be48(const u64 val, void *p) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9c59409104f6..5dd3a61d673d 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -138,13 +138,6 @@ * are handled as text/data or they can be discarded (which * often happens at runtime) */ -#ifdef CONFIG_HOTPLUG_CPU -#define CPU_KEEP(sec) *(.cpu##sec) -#define CPU_DISCARD(sec) -#else -#define CPU_KEEP(sec) -#define CPU_DISCARD(sec) *(.cpu##sec) -#endif #if defined(CONFIG_MEMORY_HOTPLUG) #define MEM_KEEP(sec) *(.mem##sec) @@ -363,7 +356,6 @@ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data*) \ - MEM_KEEP(exit.data*) \ *(.data.unlikely) \ __start_once = .; \ *(.data.once) \ @@ -378,7 +370,8 @@ BRANCH_PROFILE() \ TRACE_PRINTKS() \ BPF_RAW_TP() \ - TRACEPOINT_STR() + TRACEPOINT_STR() \ + KUNIT_TABLE() /* * Data section helpers @@ -528,7 +521,6 @@ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ *(.ref.rodata) \ MEM_KEEP(init.rodata) \ - MEM_KEEP(exit.rodata) \ } \ \ /* Built-in module parameters. */ \ @@ -581,7 +573,6 @@ *(.ref.text) \ *(.text.asan.* .text.tsan.*) \ MEM_KEEP(init.text*) \ - MEM_KEEP(exit.text*) \ /* sched.text is aling to function alignment to secure we have same @@ -710,7 +701,7 @@ EARLYCON_TABLE() \ LSM_TABLE() \ EARLY_LSM_TABLE() \ - KUNIT_TABLE() + KUNIT_INIT_TABLE() #define INIT_TEXT \ *(.init.text .init.text.*) \ @@ -721,13 +712,10 @@ *(.exit.data .exit.data.*) \ *(.fini_array .fini_array.*) \ *(.dtors .dtors.*) \ - MEM_DISCARD(exit.data*) \ - MEM_DISCARD(exit.rodata*) #define EXIT_TEXT \ *(.exit.text) \ *(.text.exit) \ - MEM_DISCARD(exit.text) #define EXIT_CALL \ *(.exitcall.exit) @@ -939,6 +927,12 @@ . = ALIGN(8); \ BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end) +/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ +#define KUNIT_INIT_TABLE() \ + . = ALIGN(8); \ + BOUNDED_SECTION_POST_LABEL(.kunit_init_test_suites, \ + __kunit_init_suites, _start, _end) + #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ . = ALIGN(4); \ diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 35e45b854a6f..51382befbe37 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -217,6 +217,18 @@ static inline void crypto_free_aead(struct crypto_aead *tfm) crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); } +/** + * crypto_has_aead() - Search for the availability of an aead. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * aead + * @type: specifies the type of the aead + * @mask: specifies the mask for the aead + * + * Return: true when the aead is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_aead(const char *alg_name, u32 type, u32 mask); + static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm) { return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 670508f1dca1..31c111bebb68 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -382,7 +382,7 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req) * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() * @src: source buffer * @slen: source length - * @dst: destinatino obuffer + * @dst: destination obuffer * @dlen: destination length * * Return: zero on success; error code in case of error @@ -400,7 +400,7 @@ int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm, * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() * @src: source buffer * @slen: source length - * @dst: destinatino obuffer + * @dst: destination obuffer * @dlen: destination length * * Return: Output length on success; error code in case of error diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index ca86f4c6ba43..7a4a71af653f 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -195,11 +195,6 @@ static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm, return PTR_ALIGN(crypto_tfm_ctx(tfm), align); } -static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) -{ - return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1); -} - static inline unsigned int crypto_dma_align(void) { return CRYPTO_DMA_ALIGN; diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 2835069c5997..545dbefe3e13 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -78,7 +78,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen); -int crypto_engine_exit(struct crypto_engine *engine); +void crypto_engine_exit(struct crypto_engine *engine); int crypto_engine_register_aead(struct aead_engine_alg *alg); void crypto_engine_unregister_aead(struct aead_engine_alg *alg); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index f7c2a22cd776..5d61f576cfc8 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -212,13 +212,9 @@ struct shash_desc { * This is a counterpart to @init_tfm, used to remove * various changes set in @init_tfm. * @clone_tfm: Copy transform into new object, may allocate memory. - * @digestsize: see struct ahash_alg - * @statesize: see struct ahash_alg * @descsize: Size of the operational state for the message digest. This state * size is the memory size that needs to be allocated for * shash_desc.__ctx - * @stat: Statistics for hash algorithm. - * @base: internally used * @halg: see struct hash_alg_common * @HASH_ALG_COMMON: see struct hash_alg_common */ @@ -250,16 +246,7 @@ struct shash_alg { #undef HASH_ALG_COMMON_STAT struct crypto_ahash { - int (*init)(struct ahash_request *req); - int (*update)(struct ahash_request *req); - int (*final)(struct ahash_request *req); - int (*finup)(struct ahash_request *req); - int (*digest)(struct ahash_request *req); - int (*export)(struct ahash_request *req, void *out); - int (*import)(struct ahash_request *req, const void *in); - int (*setkey)(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen); - + bool using_shash; /* Underlying algorithm is shash, not ahash */ unsigned int statesize; unsigned int reqsize; struct crypto_tfm base; @@ -342,12 +329,6 @@ static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); } -static inline unsigned int crypto_ahash_alignmask( - struct crypto_ahash *tfm) -{ - return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); -} - /** * crypto_ahash_blocksize() - obtain block size for cipher * @tfm: cipher handle @@ -519,10 +500,7 @@ int crypto_ahash_digest(struct ahash_request *req); * * Return: 0 if the export was successful; < 0 if an error occurred */ -static inline int crypto_ahash_export(struct ahash_request *req, void *out) -{ - return crypto_ahash_reqtfm(req)->export(req, out); -} +int crypto_ahash_export(struct ahash_request *req, void *out); /** * crypto_ahash_import() - import message digest state @@ -535,15 +513,7 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) * * Return: 0 if the import was successful; < 0 if an error occurred */ -static inline int crypto_ahash_import(struct ahash_request *req, const void *in) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->import(req, in); -} +int crypto_ahash_import(struct ahash_request *req, const void *in); /** * crypto_ahash_init() - (re)initialize message digest handle @@ -556,36 +526,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) * * Return: see crypto_ahash_final() */ -static inline int crypto_ahash_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->init(req); -} - -static inline struct crypto_istat_hash *hash_get_stat( - struct hash_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&hash_get_stat(alg)->err_cnt); - - return err; -} +int crypto_ahash_init(struct ahash_request *req); /** * crypto_ahash_update() - add data to message digest for processing @@ -598,16 +539,7 @@ static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err) * * Return: see crypto_ahash_final() */ -static inline int crypto_ahash_update(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_add(req->nbytes, &hash_get_stat(alg)->hash_tlen); - - return crypto_hash_errstat(alg, tfm->update(req)); -} +int crypto_ahash_update(struct ahash_request *req); /** * DOC: Asynchronous Hash Request Handle @@ -798,12 +730,6 @@ static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm) return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); } -static inline unsigned int crypto_shash_alignmask( - struct crypto_shash *tfm) -{ - return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); -} - /** * crypto_shash_blocksize() - obtain block size for cipher * @tfm: cipher handle @@ -952,10 +878,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, * Context: Any context. * Return: 0 if the export creation was successful; < 0 if an error occurred */ -static inline int crypto_shash_export(struct shash_desc *desc, void *out) -{ - return crypto_shash_alg(desc->tfm)->export(desc, out); -} +int crypto_shash_export(struct shash_desc *desc, void *out); /** * crypto_shash_import() - import operational state @@ -969,15 +892,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) * Context: Any context. * Return: 0 if the import was successful; < 0 if an error occurred */ -static inline int crypto_shash_import(struct shash_desc *desc, const void *in) -{ - struct crypto_shash *tfm = desc->tfm; - - if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return crypto_shash_alg(tfm)->import(desc, in); -} +int crypto_shash_import(struct shash_desc *desc, const void *in); /** * crypto_shash_init() - (re)initialize message digest diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h index dd4f06785049..d6927739f8b2 100644 --- a/include/crypto/hash_info.h +++ b/include/crypto/hash_info.h @@ -10,6 +10,7 @@ #include <crypto/sha1.h> #include <crypto/sha2.h> +#include <crypto/sha3.h> #include <crypto/md5.h> #include <crypto/streebog.h> diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index ef8ce86b1f78..78ecaf5db04c 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -121,6 +121,7 @@ struct af_alg_async_req { * * @tsgl_list: Link to TX SGL * @iv: IV for cipher operation + * @state: Existing state for continuing operation * @aead_assoclen: Length of AAD for AEAD cipher operations * @completion: Work queue for synchronous operation * @used: TX bytes sent to kernel. This variable is used to @@ -136,11 +137,13 @@ struct af_alg_async_req { * recvmsg is invoked. * @init: True if metadata has been sent. * @len: Length of memory allocated for this data structure. + * @inflight: Non-zero when AIO requests are in flight. */ struct af_alg_ctx { struct list_head tsgl_list; void *iv; + void *state; size_t aead_assoclen; struct crypto_wait wait; @@ -154,6 +157,8 @@ struct af_alg_ctx { bool init; unsigned int len; + + unsigned int inflight; }; int af_alg_register_type(const struct af_alg_type *type); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index cf65676e45f4..59c707e4dea4 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -18,15 +18,13 @@ struct crypto_hash_walk { char *data; unsigned int offset; - unsigned int alignmask; + unsigned int flags; struct page *pg; unsigned int entrylen; unsigned int total; struct scatterlist *sg; - - unsigned int flags; }; struct ahash_instance { @@ -269,11 +267,6 @@ static inline struct crypto_shash *crypto_spawn_shash( return crypto_spawn_tfm2(&spawn->base); } -static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) -{ - return crypto_tfm_ctx_aligned(&tfm->base); -} - static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_shash, base); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index fb3d9e899f52..7ae42afdcf3e 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -36,10 +36,25 @@ struct skcipher_instance { }; }; +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *inst); + union { + struct { + char head[offsetof(struct lskcipher_alg, co.base)]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + struct crypto_skcipher_spawn { struct crypto_spawn base; }; +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + struct skcipher_walk { union { struct { @@ -80,6 +95,12 @@ static inline struct crypto_instance *skcipher_crypto_instance( return &inst->s.base; } +static inline struct crypto_instance *lskcipher_crypto_instance( + struct lskcipher_instance *inst) +{ + return &inst->s.base; +} + static inline struct skcipher_instance *skcipher_alg_instance( struct crypto_skcipher *skcipher) { @@ -87,11 +108,23 @@ static inline struct skcipher_instance *skcipher_alg_instance( struct skcipher_instance, alg); } +static inline struct lskcipher_instance *lskcipher_alg_instance( + struct crypto_lskcipher *lskcipher) +{ + return container_of(crypto_lskcipher_alg(lskcipher), + struct lskcipher_instance, alg); +} + static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) { return crypto_instance_ctx(skcipher_crypto_instance(inst)); } +static inline void *lskcipher_instance_ctx(struct lskcipher_instance *inst) +{ + return crypto_instance_ctx(lskcipher_crypto_instance(inst)); +} + static inline void skcipher_request_complete(struct skcipher_request *req, int err) { crypto_request_complete(&req->base, err); @@ -101,21 +134,36 @@ int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); +int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) { crypto_drop_spawn(&spawn->base); } -static inline struct skcipher_alg *crypto_skcipher_spawn_alg( - struct crypto_skcipher_spawn *spawn) +static inline void crypto_drop_lskcipher(struct crypto_lskcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct lskcipher_alg *crypto_lskcipher_spawn_alg( + struct crypto_lskcipher_spawn *spawn) { - return container_of(spawn->base.alg, struct skcipher_alg, base); + return container_of(spawn->base.alg, struct lskcipher_alg, co.base); } -static inline struct skcipher_alg *crypto_spawn_skcipher_alg( +static inline struct skcipher_alg_common *crypto_spawn_skcipher_alg_common( struct crypto_skcipher_spawn *spawn) { - return crypto_skcipher_spawn_alg(spawn); + return container_of(spawn->base.alg, struct skcipher_alg_common, base); +} + +static inline struct lskcipher_alg *crypto_spawn_lskcipher_alg( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_lskcipher_spawn_alg(spawn); } static inline struct crypto_skcipher *crypto_spawn_skcipher( @@ -124,6 +172,12 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher( return crypto_spawn_tfm2(&spawn->base); } +static inline struct crypto_lskcipher *crypto_spawn_lskcipher( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + static inline void crypto_skcipher_set_reqsize( struct crypto_skcipher *skcipher, unsigned int reqsize) { @@ -144,6 +198,13 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); int skcipher_register_instance(struct crypto_template *tmpl, struct skcipher_instance *inst); +int crypto_register_lskcipher(struct lskcipher_alg *alg); +void crypto_unregister_lskcipher(struct lskcipher_alg *alg); +int crypto_register_lskciphers(struct lskcipher_alg *algs, int count); +void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count); +int lskcipher_register_instance(struct crypto_template *tmpl, + struct lskcipher_instance *inst); + int skcipher_walk_done(struct skcipher_walk *walk, int err); int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, @@ -166,6 +227,11 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline void *crypto_lskcipher_ctx(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm) { return crypto_tfm_ctx_dma(&tfm->base); @@ -191,41 +257,6 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req) return req->base.flags; } -static inline unsigned int crypto_skcipher_alg_min_keysize( - struct skcipher_alg *alg) -{ - return alg->min_keysize; -} - -static inline unsigned int crypto_skcipher_alg_max_keysize( - struct skcipher_alg *alg) -{ - return alg->max_keysize; -} - -static inline unsigned int crypto_skcipher_alg_walksize( - struct skcipher_alg *alg) -{ - return alg->walksize; -} - -/** - * crypto_skcipher_walksize() - obtain walk size - * @tfm: cipher handle - * - * In some cases, algorithms can only perform optimally when operating on - * multiple blocks in parallel. This is reflected by the walksize, which - * must be a multiple of the chunksize (or equal if the concern does not - * apply) - * - * Return: walk size in bytes - */ -static inline unsigned int crypto_skcipher_walksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); -} - /* Helpers for simple block cipher modes of operation */ struct skcipher_ctx_simple { struct crypto_cipher *cipher; /* underlying block cipher */ @@ -249,5 +280,24 @@ static inline struct crypto_alg *skcipher_ialg_simple( return crypto_spawn_cipher_alg(spawn); } +static inline struct crypto_lskcipher *lskcipher_cipher_simple( + struct crypto_lskcipher *tfm) +{ + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + + return *ctx; +} + +struct lskcipher_instance *lskcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb); + +static inline struct lskcipher_alg *lskcipher_ialg_simple( + struct lskcipher_instance *inst) +{ + struct crypto_lskcipher_spawn *spawn = lskcipher_instance_ctx(inst); + + return crypto_lskcipher_spawn_alg(spawn); +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/sig.h b/include/crypto/sig.h index 641b4714c448..d25186bb2be3 100644 --- a/include/crypto/sig.h +++ b/include/crypto/sig.h @@ -79,7 +79,7 @@ int crypto_sig_maxsize(struct crypto_sig *tfm); * @tfm: signature tfm handle allocated with crypto_alloc_sig() * @src: source buffer * @slen: source length - * @dst: destinatino obuffer + * @dst: destination obuffer * @dlen: destination length * * Return: zero on success; error code in case of error diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 080d1ba3611d..c8857d7bdb37 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -15,6 +15,17 @@ #include <linux/string.h> #include <linux/types.h> +/* Set this bit if the lskcipher operation is a continuation. */ +#define CRYPTO_LSKCIPHER_FLAG_CONT 0x00000001 +/* Set this bit if the lskcipher operation is final. */ +#define CRYPTO_LSKCIPHER_FLAG_FINAL 0x00000002 +/* The bit CRYPTO_TFM_REQ_MAY_SLEEP can also be set if needed. */ + +/* Set this bit if the skcipher operation is a continuation. */ +#define CRYPTO_SKCIPHER_REQ_CONT 0x00000001 +/* Set this bit if the skcipher operation is not final. */ +#define CRYPTO_SKCIPHER_REQ_NOTFINAL 0x00000002 + struct scatterlist; /** @@ -49,6 +60,10 @@ struct crypto_sync_skcipher { struct crypto_skcipher base; }; +struct crypto_lskcipher { + struct crypto_tfm base; +}; + /* * struct crypto_istat_cipher - statistics for cipher algorithm * @encrypt_cnt: number of encrypt requests @@ -65,8 +80,14 @@ struct crypto_istat_cipher { atomic64_t err_cnt; }; -/** - * struct skcipher_alg - symmetric key cipher definition +#ifdef CONFIG_CRYPTO_STATS +#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat; +#else +#define SKCIPHER_ALG_COMMON_STAT +#endif + +/* + * struct skcipher_alg_common - common properties of skcipher_alg * @min_keysize: Minimum key size supported by the transformation. This is the * smallest key length supported by this transformation algorithm. * This must be set to one of the pre-defined values as this is @@ -77,6 +98,29 @@ struct crypto_istat_cipher { * This must be set to one of the pre-defined values as this is * not hardware specific. Possible values for this field can be * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @statesize: Size of the internal state for the algorithm. + * @stat: Statistics for cipher algorithm + * @base: Definition of a generic crypto algorithm. + */ +#define SKCIPHER_ALG_COMMON { \ + unsigned int min_keysize; \ + unsigned int max_keysize; \ + unsigned int ivsize; \ + unsigned int chunksize; \ + unsigned int statesize; \ + \ + SKCIPHER_ALG_COMMON_STAT \ + \ + struct crypto_alg base; \ +} +struct skcipher_alg_common SKCIPHER_ALG_COMMON; + +/** + * struct skcipher_alg - symmetric key cipher definition * @setkey: Set key for the transformation. This function is used to either * program a supplied key into the hardware or store the key in the * transformation context for programming it later. Note that this @@ -100,6 +144,17 @@ struct crypto_istat_cipher { * be called in parallel with the same transformation object. * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt * and the conditions are exactly the same. + * @export: Export partial state of the transformation. This function dumps the + * entire state of the ongoing transformation into a provided block of + * data so it can be @import 'ed back later on. This is useful in case + * you want to save partial result of the transformation after + * processing certain amount of data and reload this partial result + * multiple times later on for multiple re-use. No data processing + * happens at this point. + * @import: Import partial state of the transformation. This function loads the + * entire state of the ongoing transformation from a provided block of + * data so the transformation can continue from this point onward. No + * data processing happens at this point. * @init: Initialize the cryptographic transformation object. This function * is used to initialize the cryptographic transformation object. * This function is called only once at the instantiation time, right @@ -111,15 +166,10 @@ struct crypto_istat_cipher { * @exit: Deinitialize the cryptographic transformation object. This is a * counterpart to @init, used to remove various changes set in * @init. - * @ivsize: IV size applicable for transformation. The consumer must provide an - * IV of exactly that size to perform the encrypt or decrypt operation. - * @chunksize: Equal to the block size except for stream ciphers such as - * CTR where it is set to the underlying block size. * @walksize: Equal to the chunk size except in cases where the algorithm is * considerably more efficient if it can operate on multiple chunks * in parallel. Should be a multiple of chunksize. - * @stat: Statistics for cipher algorithm - * @base: Definition of a generic crypto algorithm. + * @co: see struct skcipher_alg_common * * All fields except @ivsize are mandatory and must be filled. */ @@ -128,20 +178,63 @@ struct skcipher_alg { unsigned int keylen); int (*encrypt)(struct skcipher_request *req); int (*decrypt)(struct skcipher_request *req); + int (*export)(struct skcipher_request *req, void *out); + int (*import)(struct skcipher_request *req, const void *in); int (*init)(struct crypto_skcipher *tfm); void (*exit)(struct crypto_skcipher *tfm); - unsigned int min_keysize; - unsigned int max_keysize; - unsigned int ivsize; - unsigned int chunksize; unsigned int walksize; -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_cipher stat; -#endif + union { + struct SKCIPHER_ALG_COMMON; + struct skcipher_alg_common co; + }; +}; - struct crypto_alg base; +/** + * struct lskcipher_alg - linear symmetric key cipher definition + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a number of bytes. This function is used to encrypt + * the supplied data. This function shall not modify + * the transformation context, as this function may be called + * in parallel with the same transformation object. Data + * may be left over if length is not a multiple of blocks + * and there is more to come (final == false). The number of + * left-over bytes should be returned in case of success. + * The siv field shall be as long as ivsize + statesize with + * the IV placed at the front. The state will be used by the + * algorithm internally. + * @decrypt: Decrypt a number of bytes. This is a reverse counterpart to + * @encrypt and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @co: see struct skcipher_alg_common + */ +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *siv, u32 flags); + int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *siv, u32 flags); + int (*init)(struct crypto_lskcipher *tfm); + void (*exit)(struct crypto_lskcipher *tfm); + + struct skcipher_alg_common co; }; #define MAX_SYNC_SKCIPHER_REQSIZE 384 @@ -213,12 +306,36 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, u32 type, u32 mask); + +/** + * crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * lskcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an lskcipher. The returned struct + * crypto_lskcipher is the cipher handle that is required for any subsequent + * API invocation for that lskcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, + u32 type, u32 mask); + static inline struct crypto_tfm *crypto_skcipher_tfm( struct crypto_skcipher *tfm) { return &tfm->base; } +static inline struct crypto_tfm *crypto_lskcipher_tfm( + struct crypto_lskcipher *tfm) +{ + return &tfm->base; +} + /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed @@ -236,6 +353,17 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) } /** + * crypto_free_lskcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm)); +} + +/** * crypto_has_skcipher() - Search for the availability of an skcipher. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * skcipher @@ -253,6 +381,19 @@ static inline const char *crypto_skcipher_driver_name( return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); } +static inline const char *crypto_lskcipher_driver_name( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm)); +} + +static inline struct skcipher_alg_common *crypto_skcipher_alg_common( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg_common, base); +} + static inline struct skcipher_alg *crypto_skcipher_alg( struct crypto_skcipher *tfm) { @@ -260,9 +401,11 @@ static inline struct skcipher_alg *crypto_skcipher_alg( struct skcipher_alg, base); } -static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +static inline struct lskcipher_alg *crypto_lskcipher_alg( + struct crypto_lskcipher *tfm) { - return alg->ivsize; + return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg, + struct lskcipher_alg, co.base); } /** @@ -276,7 +419,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) */ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->ivsize; + return crypto_skcipher_alg_common(tfm)->ivsize; } static inline unsigned int crypto_sync_skcipher_ivsize( @@ -286,6 +429,21 @@ static inline unsigned int crypto_sync_skcipher_ivsize( } /** + * crypto_lskcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the lskcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_lskcipher_ivsize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.ivsize; +} + +/** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle * @@ -301,10 +459,20 @@ static inline unsigned int crypto_skcipher_blocksize( return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); } -static inline unsigned int crypto_skcipher_alg_chunksize( - struct skcipher_alg *alg) +/** + * crypto_lskcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the lskcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_lskcipher_blocksize( + struct crypto_lskcipher *tfm) { - return alg->chunksize; + return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm)); } /** @@ -321,7 +489,58 @@ static inline unsigned int crypto_skcipher_alg_chunksize( static inline unsigned int crypto_skcipher_chunksize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); + return crypto_skcipher_alg_common(tfm)->chunksize; +} + +/** + * crypto_lskcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_lskcipher_chunksize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.chunksize; +} + +/** + * crypto_skcipher_statesize() - obtain state size + * @tfm: cipher handle + * + * Some algorithms cannot be chained with the IV alone. They carry + * internal state which must be replicated if data is to be processed + * incrementally. The size of that state can be obtained with this + * function. + * + * Return: state size in bytes + */ +static inline unsigned int crypto_skcipher_statesize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_common(tfm)->statesize; +} + +/** + * crypto_lskcipher_statesize() - obtain state size + * @tfm: cipher handle + * + * Some algorithms cannot be chained with the IV alone. They carry + * internal state which must be replicated if data is to be processed + * incrementally. The size of that state can be obtained with this + * function. + * + * Return: state size in bytes + */ +static inline unsigned int crypto_lskcipher_statesize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.statesize; } static inline unsigned int crypto_sync_skcipher_blocksize( @@ -336,6 +555,12 @@ static inline unsigned int crypto_skcipher_alignmask( return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); } +static inline unsigned int crypto_lskcipher_alignmask( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm)); +} + static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) { return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); @@ -371,6 +596,23 @@ static inline void crypto_sync_skcipher_clear_flags( crypto_skcipher_clear_flags(&tfm->base, flags); } +static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm)); +} + +static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags); +} + +static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags); +} + /** * crypto_skcipher_setkey() - set key for cipher * @tfm: cipher handle @@ -396,16 +638,47 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, return crypto_skcipher_setkey(&tfm->base, key, keylen); } +/** + * crypto_lskcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the lskcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen); + static inline unsigned int crypto_skcipher_min_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->min_keysize; + return crypto_skcipher_alg_common(tfm)->min_keysize; } static inline unsigned int crypto_skcipher_max_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->max_keysize; + return crypto_skcipher_alg_common(tfm)->max_keysize; +} + +static inline unsigned int crypto_lskcipher_min_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.min_keysize; +} + +static inline unsigned int crypto_lskcipher_max_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.max_keysize; } /** @@ -458,6 +731,78 @@ int crypto_skcipher_encrypt(struct skcipher_request *req); int crypto_skcipher_decrypt(struct skcipher_request *req); /** + * crypto_skcipher_export() - export partial state + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the operation + * @out: output buffer of sufficient size that can hold the state + * + * Export partial state of the transformation. This function dumps the + * entire state of the ongoing transformation into a provided block of + * data so it can be @import 'ed back later on. This is useful in case + * you want to save partial result of the transformation after + * processing certain amount of data and reload this partial result + * multiple times later on for multiple re-use. No data processing + * happens at this point. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_skcipher_export(struct skcipher_request *req, void *out); + +/** + * crypto_skcipher_import() - import partial state + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the operation + * @in: buffer holding the state + * + * Import partial state of the transformation. This function loads the + * entire state of the ongoing transformation from a provided block of + * data so the transformation can continue from this point onward. No + * data processing happens at this point. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_skcipher_import(struct skcipher_request *req, const void *in); + +/** + * crypto_lskcipher_encrypt() - encrypt plaintext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @siv: IV + state for the cipher operation. The length of the IV must + * comply with the IV size defined by crypto_lskcipher_ivsize. The + * IV is then followed with a buffer with the length as specified by + * crypto_lskcipher_statesize. + * Encrypt plaintext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *siv); + +/** + * crypto_lskcipher_decrypt() - decrypt ciphertext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @siv: IV + state for the cipher operation. The length of the IV must + * comply with the IV size defined by crypto_lskcipher_ivsize. The + * IV is then followed with a buffer with the length as specified by + * crypto_lskcipher_statesize. + * + * Decrypt ciphertext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *siv); + +/** * DOC: Symmetric Key Cipher Request Handle * * The skcipher_request data structure contains all pointers to data diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 90b69270f2fa..724c45e3e9a7 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -68,4 +68,9 @@ enum amd_asic_type { extern const char *amdgpu_asic_name[]; +struct amdgpu_asic_type_quirk { + unsigned short device; /* PCI device ID */ + u8 revision; /* revision ID */ + unsigned short type; /* real ASIC type */ +}; #endif /*__AMD_ASIC_TYPE_H__ */ diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h new file mode 100644 index 000000000000..c4c423e97f06 --- /dev/null +++ b/include/drm/bridge/aux-bridge.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> + */ +#ifndef DRM_AUX_BRIDGE_H +#define DRM_AUX_BRIDGE_H + +#include <drm/drm_connector.h> + +#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE) +int drm_aux_bridge_register(struct device *parent); +#else +static inline int drm_aux_bridge_register(struct device *parent) +{ + return 0; +} +#endif + +#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE) +struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np); +void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status); +#else +static inline struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np) +{ + return NULL; +} + +static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status) +{ +} +#endif + +#endif diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h index 5286a53a1875..65d5e68065e3 100644 --- a/include/drm/bridge/dw_mipi_dsi.h +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -11,6 +11,10 @@ #include <linux/types.h> +#include <drm/drm_atomic.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> #include <drm/drm_modes.h> struct drm_display_mode; @@ -55,6 +59,17 @@ struct dw_mipi_dsi_plat_data { unsigned long mode_flags, u32 lanes, u32 format); + bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + u32 *(*get_input_bus_fmts)(void *priv_data, + struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + const struct dw_mipi_dsi_phy_ops *phy_ops; const struct dw_mipi_dsi_host_ops *host_ops; @@ -68,5 +83,6 @@ void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi); int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder); void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi); void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave); +struct drm_bridge *dw_mipi_dsi_get_bridge(struct dw_mipi_dsi *dsi); #endif /* __DW_MIPI_DSI__ */ diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h index 05100e91ecb9..e0c105051246 100644 --- a/include/drm/bridge/samsung-dsim.h +++ b/include/drm/bridge/samsung-dsim.h @@ -53,6 +53,7 @@ struct samsung_dsim_driver_data { unsigned int plltmr_reg; unsigned int has_freqband:1; unsigned int has_clklane_stop:1; + unsigned int has_broken_fifoctrl_emptyhdr:1; unsigned int num_clks; unsigned int min_freq; unsigned int max_freq; @@ -60,6 +61,8 @@ struct samsung_dsim_driver_data { unsigned int num_bits_resol; unsigned int pll_p_offset; const unsigned int *reg_values; + unsigned int pll_fin_min; + unsigned int pll_fin_max; u16 m_min; u16 m_max; }; @@ -87,6 +90,7 @@ struct samsung_dsim { void __iomem *reg_base; struct phy *phy; struct clk **clks; + struct clk *pll_clk; struct regulator_bulk_data supplies[2]; int irq; struct gpio_desc *te_gpio; @@ -115,7 +119,7 @@ struct samsung_dsim { }; extern int samsung_dsim_probe(struct platform_device *pdev); -extern int samsung_dsim_remove(struct platform_device *pdev); +extern void samsung_dsim_remove(struct platform_device *pdev); extern const struct dev_pm_ops samsung_dsim_pm_ops; #endif /* __SAMSUNG_DSIM__ */ diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index e69cece404b3..3731828825bd 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -148,6 +148,7 @@ #define DP_RECEIVE_PORT_0_CAP_0 0x008 # define DP_LOCAL_EDID_PRESENT (1 << 1) # define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2) +# define DP_HBLANK_EXPANSION_CAPABLE (1 << 3) #define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009 @@ -543,6 +544,10 @@ /* DFP Capability Extension */ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */ +#define DP_PANEL_REPLAY_CAP 0x0b0 /* DP 2.0 */ +# define DP_PANEL_REPLAY_SUPPORT (1 << 0) +# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1) + /* Link Configuration */ #define DP_LINK_BW_SET 0x100 # define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ @@ -646,6 +651,9 @@ # define DP_LINK_QUAL_PATTERN_PRSBS31 0x38 # define DP_LINK_QUAL_PATTERN_CUSTOM 0x40 # define DP_LINK_QUAL_PATTERN_SQUARE 0x48 +# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED 0x49 +# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED 0x4a +# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED 0x4b #define DP_TRAINING_LANE0_1_SET2 0x10f #define DP_TRAINING_LANE2_3_SET2 0x110 @@ -699,6 +707,7 @@ #define DP_DSC_ENABLE 0x160 /* DP 1.4 */ # define DP_DECOMPRESSION_EN (1 << 0) +# define DP_DSC_PASSTHROUGH_EN (1 << 1) #define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */ #define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ @@ -716,6 +725,13 @@ #define DP_BRANCH_DEVICE_CTRL 0x1a1 # define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) +#define PANEL_REPLAY_CONFIG 0x1b0 /* DP 2.0 */ +# define DP_PANEL_REPLAY_ENABLE (1 << 0) +# define DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN (1 << 3) +# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN (1 << 4) +# define DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN (1 << 5) +# define DP_PANEL_REPLAY_SU_ENABLE (1 << 6) + #define DP_PAYLOAD_ALLOCATE_SET 0x1c0 #define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 #define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 @@ -1105,6 +1121,18 @@ #define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */ #define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */ +#define DP_PANEL_REPLAY_ERROR_STATUS 0x2020 /* DP 2.1*/ +# define DP_PANEL_REPLAY_LINK_CRC_ERROR (1 << 0) +# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR (1 << 1) +# define DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) + +#define DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS 0x2022 /* DP 2.1 */ +# define DP_SINK_DEVICE_PANEL_REPLAY_STATUS_MASK (7 << 0) +# define DP_SINK_FRAME_LOCKED_SHIFT 3 +# define DP_SINK_FRAME_LOCKED_MASK (3 << 3) +# define DP_SINK_FRAME_LOCKED_STATUS_VALID_SHIFT 5 +# define DP_SINK_FRAME_LOCKED_STATUS_VALID_MASK (1 << 5) + /* Extended Receiver Capability: See DP_DPCD_REV for definitions */ #define DP_DP13_DPCD_REV 0x2200 diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index 86f24a759268..863b2e7add29 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -164,6 +164,7 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) } /* DP/eDP DSC support */ +u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], bool is_edp); u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); @@ -251,6 +252,19 @@ drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP); } +/** + * drm_dp_is_uhbr_rate - Determine if a link rate is UHBR + * @link_rate: link rate in 10kbits/s units + * + * Determine if the provided link rate is an UHBR rate. + * + * Returns: %True if @link_rate is an UHBR rate. + */ +static inline bool drm_dp_is_uhbr_rate(int link_rate) +{ + return link_rate >= 1000000; +} + /* * DisplayPort AUX channel */ @@ -272,8 +286,8 @@ struct drm_dp_aux_msg { }; struct cec_adapter; -struct edid; struct drm_connector; +struct drm_edid; /** * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX @@ -507,18 +521,18 @@ bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], u8 type); bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -530,7 +544,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid, + const struct drm_edid *drm_edid, struct drm_dp_aux *aux); enum drm_mode_subconnector drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -632,6 +646,13 @@ enum drm_dp_quirk { * the DP_MAX_LINK_RATE register reporting a lower max multiplier. */ DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, + /** + * @DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC: + * + * The device applies HBLANK expansion for some modes, but this + * requires enabling DSC. + */ + DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC, }; /** @@ -699,6 +720,7 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux); void drm_dp_cec_register_connector(struct drm_dp_aux *aux, struct drm_connector *connector); void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address); void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); #else @@ -716,6 +738,11 @@ static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) { } +static inline void drm_dp_cec_attach(struct drm_dp_aux *aux, + u16 source_physical_address) +{ +} + static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) { @@ -775,4 +802,15 @@ bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZ const u8 port_cap[4], u8 color_spc); int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc); +#define DRM_DP_BW_OVERHEAD_MST BIT(0) +#define DRM_DP_BW_OVERHEAD_UHBR BIT(1) +#define DRM_DP_BW_OVERHEAD_SSC_REF_CLK BIT(2) +#define DRM_DP_BW_OVERHEAD_FEC BIT(3) +#define DRM_DP_BW_OVERHEAD_DSC BIT(4) + +int drm_dp_bw_overhead(int lane_count, int hactive, + int dsc_slice_count, + int bpp_x16, unsigned long flags); +int drm_dp_bw_channel_coding_efficiency(bool is_uhbr); + #endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index ed5c9660563c..9b19d8bd520a 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -25,6 +25,7 @@ #include <linux/types.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic.h> +#include <drm/drm_fixed.h> #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) #include <linux/stackdepot.h> @@ -46,6 +47,13 @@ struct drm_dp_mst_topology_ref_history { }; #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */ +enum drm_dp_mst_payload_allocation { + DRM_DP_MST_PAYLOAD_ALLOCATION_NONE, + DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL, + DRM_DP_MST_PAYLOAD_ALLOCATION_DFP, + DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE, +}; + struct drm_dp_mst_branch; /** @@ -537,7 +545,7 @@ struct drm_dp_mst_atomic_payload { * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the * previous MST states payload start slots have been copied over to the new state. Note * that a new start slot won't be assigned/removed from this payload until - * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called. + * drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called. * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to * get committed to hardware by calling drm_crtc_commit_wait() on each of the * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps. @@ -564,6 +572,9 @@ struct drm_dp_mst_atomic_payload { /** @dsc_enabled: Whether or not this payload has DSC enabled */ bool dsc_enabled : 1; + /** @payload_allocation_status: The allocation status of this payload */ + enum drm_dp_mst_payload_allocation payload_allocation_status; + /** @next: The list node for this payload */ struct list_head next; }; @@ -607,7 +618,7 @@ struct drm_dp_mst_topology_state { * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this * out itself. */ - int pbn_div; + fixed20_12 pbn_div; }; #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) @@ -829,10 +840,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); -int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, - int link_rate, int link_lane_count); +fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, + int link_rate, int link_lane_count); -int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); +int drm_dp_calc_pbn_mode(int clock, int bpp); void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap); @@ -842,10 +853,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, struct drm_atomic_state *state, struct drm_dp_mst_atomic_payload *payload); -void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_topology_state *mst_state, - const struct drm_dp_mst_atomic_payload *old_payload, - struct drm_dp_mst_atomic_payload *new_payload); +void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload); +void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + const struct drm_dp_mst_atomic_payload *old_payload, + struct drm_dp_mst_atomic_payload *new_payload); int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); @@ -879,6 +893,9 @@ drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_atomic_payload * drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state, struct drm_dp_mst_port *port); +bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_mst_port *parent); int __must_check drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, @@ -900,6 +917,10 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, struct drm_dp_query_stream_enc_status_ack_reply *status); +int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_port **failing_port); int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state, struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h index d4955062c77e..f4d3784b1dce 100644 --- a/include/drm/drm_accel.h +++ b/include/drm/drm_accel.h @@ -58,7 +58,8 @@ int accel_minor_alloc(void); void accel_minor_replace(struct drm_minor *minor, int index); void accel_set_device_instance_params(struct device *kdev, int index); int accel_open(struct inode *inode, struct file *filp); -void accel_debugfs_init(struct drm_minor *minor, int minor_id); +void accel_debugfs_init(struct drm_device *dev); +void accel_debugfs_register(struct drm_device *dev); #else @@ -89,7 +90,11 @@ static inline void accel_set_device_instance_params(struct device *kdev, int ind { } -static inline void accel_debugfs_init(struct drm_minor *minor, int minor_id) +static inline void accel_debugfs_init(struct drm_device *dev) +{ +} + +static inline void accel_debugfs_register(struct drm_device *dev) { } diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 9a022caacf93..cf8e1220a4ac 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -1126,7 +1126,7 @@ struct drm_bridge_state { struct drm_bus_cfg input_bus_cfg; /** - * @output_bus_cfg: input bus configuration + * @output_bus_cfg: output bus configuration */ struct drm_bus_cfg output_bus_cfg; }; diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 536a0b0091c3..9aa0a05aa072 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -49,9 +49,8 @@ struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); -int -drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder, - struct drm_connector_state *conn_state); +int drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector, + struct drm_atomic_state *state); int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, const struct drm_crtc_state *crtc_state, int min_scale, @@ -97,6 +96,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state); +void drm_atomic_helper_unprepare_planes(struct drm_device *dev, + struct drm_atomic_state *state); #define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0) #define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1) diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index ba248ca8866f..50131383ed81 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -33,24 +33,6 @@ #include <linux/wait.h> struct drm_file; -struct drm_hw_lock; - -/* - * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for - * include ordering reasons. - * - * DO NOT USE. - */ -struct drm_lock_data { - struct drm_hw_lock *hw_lock; - struct drm_file *file_priv; - wait_queue_head_t lock_queue; - unsigned long lock_time; - spinlock_t spinlock; - uint32_t kernel_waiters; - uint32_t user_waiters; - int idle_has_lock; -}; /** * struct drm_master - drm master structure @@ -145,10 +127,6 @@ struct drm_master { * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. */ struct idr lessee_idr; - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - struct drm_lock_data lock; -#endif }; struct drm_master *drm_master_get(struct drm_master *master); diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index c339fc85fd07..e39da5807ba7 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -32,6 +32,8 @@ #include <drm/drm_mode_object.h> #include <drm/drm_modes.h> +struct device_node; + struct drm_bridge; struct drm_bridge_timings; struct drm_connector; @@ -192,7 +194,7 @@ struct drm_bridge_funcs { * or &drm_encoder_helper_funcs.dpms hook. * * The bridge must assume that the display pipe (i.e. clocks and timing - * singals) feeding it is no longer running when this callback is + * signals) feeding it is no longer running when this callback is * called. * * The @post_disable callback is optional. @@ -716,10 +718,8 @@ struct drm_bridge { struct drm_encoder *encoder; /** @chain_node: used to form a bridge chain */ struct list_head chain_node; -#ifdef CONFIG_OF /** @of_node: device node pointer to the bridge */ struct device_node *of_node; -#endif /** @list: to keep track of all added bridges */ struct list_head list; /** @@ -950,6 +950,4 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, } #endif -void drm_bridge_debugfs_init(struct drm_minor *minor); - #endif diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h index 572077ff8ae7..a5b39fc01003 100644 --- a/include/drm/drm_buddy.h +++ b/include/drm/drm_buddy.h @@ -22,8 +22,9 @@ start__ >= max__ || size__ > max__ - start__; \ }) -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0) -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1) +#define DRM_BUDDY_RANGE_ALLOCATION BIT(0) +#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1) +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) struct drm_buddy_block { #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p); void drm_buddy_block_print(struct drm_buddy *mm, struct drm_buddy_block *block, struct drm_printer *p); - #endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index c0a14b40c039..d47458ecdac4 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -195,6 +195,6 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); drm_for_each_connector_iter(connector, iter) \ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) -void drm_client_debugfs_init(struct drm_minor *minor); +void drm_client_debugfs_init(struct drm_device *dev); #endif diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index 81c298488b0c..ed81741036d7 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -24,6 +24,7 @@ #define __DRM_COLOR_MGMT_H__ #include <linux/ctype.h> +#include <linux/math64.h> #include <drm/drm_property.h> struct drm_crtc; @@ -36,20 +37,17 @@ struct drm_plane; * * Extract a degamma/gamma LUT value provided by user (in the form of * &drm_color_lut entries) and round it to the precision supported by the - * hardware. + * hardware, following OpenGL int<->float conversion rules + * (see eg. OpenGL 4.6 specification - 2.3.5 Fixed-Point Data Conversions). */ static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision) { - u32 val = user_input; - u32 max = 0xffff >> (16 - bit_precision); - - /* Round only if we're not using full precision. */ - if (bit_precision < 16) { - val += 1UL << (16 - bit_precision - 1); - val >>= 16 - bit_precision; - } - - return clamp_val(val, 0, max); + if (bit_precision > 16) + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(user_input, (1 << bit_precision) - 1), + (1 << 16) - 1); + else + return DIV_ROUND_CLOSEST(user_input * ((1 << bit_precision) - 1), + (1 << 16) - 1); } u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index d300fde6c1a4..fe88d7fc6b8f 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -498,6 +498,8 @@ enum drm_privacy_screen_status { * ITU-R BT.601 colorimetry format * The DP spec does not say whether this is the 525 or the 625 * line version. + * @DRM_MODE_COLORIMETRY_COUNT: + * Not a valid value; merely used four counting */ enum drm_colorspace { /* For Default case, driver will set the colorspace */ @@ -522,7 +524,6 @@ enum drm_colorspace { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13, DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14, DRM_MODE_COLORIMETRY_BT601_YCC = 15, - /* not a valid value; merely used for counting */ DRM_MODE_COLORIMETRY_COUNT }; @@ -816,6 +817,14 @@ struct drm_display_info { * @quirks: EDID based quirks. Internal to EDID parsing. */ u32 quirks; + + /** + * @source_physical_address: Source Physical Address from HDMI + * Vendor-Specific Data Block, for CEC usage. + * + * Defaults to CEC_PHYS_ADDR_INVALID (0xffff). + */ + u16 source_physical_address; }; int drm_display_info_set_bus_formats(struct drm_display_info *info, @@ -1327,7 +1336,8 @@ struct drm_connector_funcs { * This will get called when a hotplug-event for a drm-connector * has been received from a source outside the display driver / device. */ - void (*oob_hotplug_event)(struct drm_connector *connector); + void (*oob_hotplug_event)(struct drm_connector *connector, + enum drm_connector_status status); /** * @debugfs_init: @@ -1971,7 +1981,8 @@ drm_connector_is_unregistered(struct drm_connector *connector) DRM_CONNECTOR_UNREGISTERED; } -void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode); +void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode, + enum drm_connector_status status); const char *drm_get_connector_type_name(unsigned int connector_type); const char *drm_get_connector_status_name(enum drm_connector_status status); const char *drm_get_subpixel_order_name(enum subpixel_order order); diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h index cb2c1956a214..cf06cee4343f 100644 --- a/include/drm/drm_debugfs.h +++ b/include/drm/drm_debugfs.h @@ -35,7 +35,7 @@ #include <linux/types.h> #include <linux/seq_file.h> -#include <drm/drm_gpuva_mgr.h> +#include <drm/drm_gpuvm.h> /** * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space @@ -142,8 +142,8 @@ struct drm_debugfs_entry { void drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, struct drm_minor *minor); -int drm_debugfs_remove_files(const struct drm_info_list *files, - int count, struct drm_minor *minor); +int drm_debugfs_remove_files(const struct drm_info_list *files, int count, + struct dentry *root, struct drm_minor *minor); void drm_debugfs_add_file(struct drm_device *dev, const char *name, int (*show)(struct seq_file*, void*), void *data); @@ -152,7 +152,7 @@ void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count); int drm_debugfs_gpuva_info(struct seq_file *m, - struct drm_gpuva_manager *mgr); + struct drm_gpuvm *gpuvm); #else static inline void drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, @@ -160,7 +160,8 @@ static inline void drm_debugfs_create_files(const struct drm_info_list *files, {} static inline int drm_debugfs_remove_files(const struct drm_info_list *files, - int count, struct drm_minor *minor) + int count, struct dentry *root, + struct drm_minor *minor) { return 0; } @@ -176,7 +177,7 @@ static inline void drm_debugfs_add_files(struct drm_device *dev, {} static inline int drm_debugfs_gpuva_info(struct seq_file *m, - struct drm_gpuva_manager *mgr) + struct drm_gpuvm *gpuvm) { return 0; } diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 7cf4afae2e79..63767cf24371 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -6,7 +6,6 @@ #include <linux/mutex.h> #include <linux/idr.h> -#include <drm/drm_legacy.h> #include <drm/drm_mode_config.h> struct drm_driver; @@ -153,8 +152,8 @@ struct drm_device { * * Lock for others (not &drm_minor.master and &drm_file.is_master) * - * WARNING: - * Only drivers annotated with DRIVER_LEGACY should be using this. + * TODO: This lock used to be the BKL of the DRM subsystem. Move the + * lock into i915, which is the only remaining user. */ struct mutex struct_mutex; @@ -312,85 +311,11 @@ struct drm_device { struct drm_fb_helper *fb_helper; /** - * @debugfs_mutex: + * @debugfs_root: * - * Protects &debugfs_list access. + * Root directory for debugfs files. */ - struct mutex debugfs_mutex; - - /** - * @debugfs_list: - * - * List of debugfs files to be created by the DRM device. The files - * must be added during drm_dev_register(). - */ - struct list_head debugfs_list; - - /* Everything below here is for legacy driver, never use! */ - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - /* List of devices per driver for stealth attach cleanup */ - struct list_head legacy_dev_list; - -#ifdef __alpha__ - /** @hose: PCI hose, only used on ALPHA platforms. */ - struct pci_controller *hose; -#endif - - /* AGP data */ - struct drm_agp_head *agp; - - /* Context handle management - linked list of context handles */ - struct list_head ctxlist; - - /* Context handle management - mutex for &ctxlist */ - struct mutex ctxlist_mutex; - - /* Context handle management */ - struct idr ctx_idr; - - /* Memory management - linked list of regions */ - struct list_head maplist; - - /* Memory management - user token hash table for maps */ - struct drm_open_hash map_hash; - - /* Context handle management - list of vmas (for debugging) */ - struct list_head vmalist; - - /* Optional pointer for DMA support */ - struct drm_device_dma *dma; - - /* Context swapping flag */ - __volatile__ long context_flag; - - /* Last current context */ - int last_context; - - /* Lock for &buf_use and a few other things. */ - spinlock_t buf_lock; - - /* Usage counter for buffers in use -- cannot alloc */ - int buf_use; - - /* Buffer allocation in progress */ - atomic_t buf_alloc; - - struct { - int context; - struct drm_hw_lock *lock; - } sigdata; - - struct drm_local_map *agp_buffer_map; - unsigned int agp_buffer_token; - - /* Scatter gather memory */ - struct drm_sg_mem *sg; - - /* IRQs */ - bool irq_enabled; - int irq; -#endif + struct dentry *debugfs_root; }; #endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 9813fa759b75..8878260d7529 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -110,6 +110,15 @@ enum drm_driver_feature { * Driver supports user defined GPU VA bindings for GEM objects. */ DRIVER_GEM_GPUVA = BIT(8), + /** + * @DRIVER_CURSOR_HOTSPOT: + * + * Driver supports and requires cursor hotspot information in the + * cursor plane (e.g. cursor plane has to actually track the mouse + * cursor and the clients are required to set hotspot in order for + * the cursor planes to work correctly). + */ + DRIVER_CURSOR_HOTSPOT = BIT(9), /* IMPORTANT: Below are all the legacy flags, add new ones above. */ @@ -433,25 +442,6 @@ struct drm_driver { * some examples. */ const struct file_operations *fops; - -#ifdef CONFIG_DRM_LEGACY - /* Everything below here is for legacy driver, never use! */ - /* private: */ - - int (*firstopen) (struct drm_device *); - void (*preclose) (struct drm_device *, struct drm_file *file_priv); - int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); - int (*dma_quiescent) (struct drm_device *); - int (*context_dtor) (struct drm_device *dev, int context); - irqreturn_t (*irq_handler)(int irq, void *arg); - void (*irq_preinstall)(struct drm_device *dev); - int (*irq_postinstall)(struct drm_device *dev); - void (*irq_uninstall)(struct drm_device *dev); - u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); - int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); - void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); - int dev_priv_size; -#endif }; void *__devm_drm_dev_alloc(struct device *parent, @@ -581,4 +571,12 @@ static inline bool drm_firmware_drivers_only(void) return video_firmware_drivers_only(); } +#if defined(CONFIG_DEBUG_FS) +void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root); +#else +static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root) +{ +} +#endif + #endif diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 48e93f909ef6..518d1b8106c7 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -269,64 +269,6 @@ struct detailed_timing { #define DRM_EDID_DSC_MAX_SLICES 0xf #define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f -/* ELD Header Block */ -#define DRM_ELD_HEADER_BLOCK_SIZE 4 - -#define DRM_ELD_VER 0 -# define DRM_ELD_VER_SHIFT 3 -# define DRM_ELD_VER_MASK (0x1f << 3) -# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ -# define DRM_ELD_VER_CANNED (0x1f << 3) - -#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ - -/* ELD Baseline Block for ELD_Ver == 2 */ -#define DRM_ELD_CEA_EDID_VER_MNL 4 -# define DRM_ELD_CEA_EDID_VER_SHIFT 5 -# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) -# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) -# define DRM_ELD_MNL_SHIFT 0 -# define DRM_ELD_MNL_MASK (0x1f << 0) - -#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 -# define DRM_ELD_SAD_COUNT_SHIFT 4 -# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) -# define DRM_ELD_CONN_TYPE_SHIFT 2 -# define DRM_ELD_CONN_TYPE_MASK (3 << 2) -# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) -# define DRM_ELD_CONN_TYPE_DP (1 << 2) -# define DRM_ELD_SUPPORTS_AI (1 << 1) -# define DRM_ELD_SUPPORTS_HDCP (1 << 0) - -#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ -# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ - -#define DRM_ELD_SPEAKER 7 -# define DRM_ELD_SPEAKER_MASK 0x7f -# define DRM_ELD_SPEAKER_RLRC (1 << 6) -# define DRM_ELD_SPEAKER_FLRC (1 << 5) -# define DRM_ELD_SPEAKER_RC (1 << 4) -# define DRM_ELD_SPEAKER_RLR (1 << 3) -# define DRM_ELD_SPEAKER_FC (1 << 2) -# define DRM_ELD_SPEAKER_LFE (1 << 1) -# define DRM_ELD_SPEAKER_FLR (1 << 0) - -#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ -# define DRM_ELD_PORT_ID_LEN 8 - -#define DRM_ELD_MANUFACTURER_NAME0 16 -#define DRM_ELD_MANUFACTURER_NAME1 17 - -#define DRM_ELD_PRODUCT_CODE0 18 -#define DRM_ELD_PRODUCT_CODE1 19 - -#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ - -#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) - struct edid { u8 header[8]; /* Vendor & product info */ @@ -387,11 +329,6 @@ int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, const struct drm_display_mode *mode); -#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE -int __drm_set_edid_firmware_path(const char *path); -int __drm_get_edid_firmware_path(char *buf, size_t bufsize); -#endif - bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2); int @@ -410,96 +347,6 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, enum hdmi_quantization_range rgb_quant_range); /** - * drm_eld_mnl - Get ELD monitor name length in bytes. - * @eld: pointer to an eld memory structure with mnl set - */ -static inline int drm_eld_mnl(const uint8_t *eld) -{ - return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; -} - -/** - * drm_eld_sad - Get ELD SAD structures. - * @eld: pointer to an eld memory structure with sad_count set - */ -static inline const uint8_t *drm_eld_sad(const uint8_t *eld) -{ - unsigned int ver, mnl; - - ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; - if (ver != 2 && ver != 31) - return NULL; - - mnl = drm_eld_mnl(eld); - if (mnl > 16) - return NULL; - - return eld + DRM_ELD_CEA_SAD(mnl, 0); -} - -/** - * drm_eld_sad_count - Get ELD SAD count. - * @eld: pointer to an eld memory structure with sad_count set - */ -static inline int drm_eld_sad_count(const uint8_t *eld) -{ - return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> - DRM_ELD_SAD_COUNT_SHIFT; -} - -/** - * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes - * @eld: pointer to an eld memory structure with mnl and sad_count set - * - * This is a helper for determining the payload size of the baseline block, in - * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. - */ -static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) -{ - return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + - drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; -} - -/** - * drm_eld_size - Get ELD size in bytes - * @eld: pointer to a complete eld memory structure - * - * The returned value does not include the vendor block. It's vendor specific, - * and comprises of the remaining bytes in the ELD memory buffer after - * drm_eld_size() bytes of header and baseline block. - * - * The returned value is guaranteed to be a multiple of 4. - */ -static inline int drm_eld_size(const uint8_t *eld) -{ - return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; -} - -/** - * drm_eld_get_spk_alloc - Get speaker allocation - * @eld: pointer to an ELD memory structure - * - * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER - * field definitions to identify speakers. - */ -static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) -{ - return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; -} - -/** - * drm_eld_get_conn_type - Get device type hdmi/dp connected - * @eld: pointer to an ELD memory structure - * - * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to - * identify the display type connected. - */ -static inline u8 drm_eld_get_conn_type(const uint8_t *eld) -{ - return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; -} - -/** * drm_edid_decode_mfg_id - Decode the manufacturer ID * @mfg_id: The manufacturer ID * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0' @@ -612,6 +459,7 @@ const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector, int drm_edid_connector_update(struct drm_connector *connector, const struct drm_edid *edid); int drm_edid_connector_add_modes(struct drm_connector *connector); +bool drm_edid_is_digital(const struct drm_edid *drm_edid); const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid, int ext_id, int *ext_index); diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h new file mode 100644 index 000000000000..0a88d10b28b0 --- /dev/null +++ b/include/drm/drm_eld.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __DRM_ELD_H__ +#define __DRM_ELD_H__ + +#include <linux/types.h> + +struct cea_sad; + +/* ELD Header Block */ +#define DRM_ELD_HEADER_BLOCK_SIZE 4 + +#define DRM_ELD_VER 0 +# define DRM_ELD_VER_SHIFT 3 +# define DRM_ELD_VER_MASK (0x1f << 3) +# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ +# define DRM_ELD_VER_CANNED (0x1f << 3) + +#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ + +/* ELD Baseline Block for ELD_Ver == 2 */ +#define DRM_ELD_CEA_EDID_VER_MNL 4 +# define DRM_ELD_CEA_EDID_VER_SHIFT 5 +# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) +# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) +# define DRM_ELD_MNL_SHIFT 0 +# define DRM_ELD_MNL_MASK (0x1f << 0) + +#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 +# define DRM_ELD_SAD_COUNT_SHIFT 4 +# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) +# define DRM_ELD_CONN_TYPE_SHIFT 2 +# define DRM_ELD_CONN_TYPE_MASK (3 << 2) +# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) +# define DRM_ELD_CONN_TYPE_DP (1 << 2) +# define DRM_ELD_SUPPORTS_AI (1 << 1) +# define DRM_ELD_SUPPORTS_HDCP (1 << 0) + +#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ +# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ + +#define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_MASK 0x7f +# define DRM_ELD_SPEAKER_RLRC (1 << 6) +# define DRM_ELD_SPEAKER_FLRC (1 << 5) +# define DRM_ELD_SPEAKER_RC (1 << 4) +# define DRM_ELD_SPEAKER_RLR (1 << 3) +# define DRM_ELD_SPEAKER_FC (1 << 2) +# define DRM_ELD_SPEAKER_LFE (1 << 1) +# define DRM_ELD_SPEAKER_FLR (1 << 0) + +#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ +# define DRM_ELD_PORT_ID_LEN 8 + +#define DRM_ELD_MANUFACTURER_NAME0 16 +#define DRM_ELD_MANUFACTURER_NAME1 17 + +#define DRM_ELD_PRODUCT_CODE0 18 +#define DRM_ELD_PRODUCT_CODE1 19 + +#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ + +#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) + +/** + * drm_eld_mnl - Get ELD monitor name length in bytes. + * @eld: pointer to an eld memory structure with mnl set + */ +static inline int drm_eld_mnl(const u8 *eld) +{ + return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; +} + +int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad); +int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad); + +/** + * drm_eld_sad - Get ELD SAD structures. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline const u8 *drm_eld_sad(const u8 *eld) +{ + unsigned int ver, mnl; + + ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; + if (ver != 2 && ver != 31) + return NULL; + + mnl = drm_eld_mnl(eld); + if (mnl > 16) + return NULL; + + return eld + DRM_ELD_CEA_SAD(mnl, 0); +} + +/** + * drm_eld_sad_count - Get ELD SAD count. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline int drm_eld_sad_count(const u8 *eld) +{ + return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> + DRM_ELD_SAD_COUNT_SHIFT; +} + +/** + * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes + * @eld: pointer to an eld memory structure with mnl and sad_count set + * + * This is a helper for determining the payload size of the baseline block, in + * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. + */ +static inline int drm_eld_calc_baseline_block_size(const u8 *eld) +{ + return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + + drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; +} + +/** + * drm_eld_size - Get ELD size in bytes + * @eld: pointer to a complete eld memory structure + * + * The returned value does not include the vendor block. It's vendor specific, + * and comprises of the remaining bytes in the ELD memory buffer after + * drm_eld_size() bytes of header and baseline block. + * + * The returned value is guaranteed to be a multiple of 4. + */ +static inline int drm_eld_size(const u8 *eld) +{ + return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; +} + +/** + * drm_eld_get_spk_alloc - Get speaker allocation + * @eld: pointer to an ELD memory structure + * + * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER + * field definitions to identify speakers. + */ +static inline u8 drm_eld_get_spk_alloc(const u8 *eld) +{ + return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; +} + +/** + * drm_eld_get_conn_type - Get device type hdmi/dp connected + * @eld: pointer to an ELD memory structure + * + * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to + * identify the display type connected. + */ +static inline u8 drm_eld_get_conn_type(const u8 *eld) +{ + return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; +} + +#endif /* __DRM_ELD_H__ */ diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index 3a09682af685..977a9381c8ba 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -60,7 +60,7 @@ struct drm_encoder_funcs { * @late_register: * * This optional hook can be used to register additional userspace - * interfaces attached to the encoder like debugfs interfaces. + * interfaces attached to the encoder. * It is called late in the driver load sequence from drm_dev_register(). * Everything added from this callback should be unregistered in * the early_unregister callback. @@ -81,6 +81,13 @@ struct drm_encoder_funcs { * before data structures are torndown. */ void (*early_unregister)(struct drm_encoder *encoder); + + /** + * @debugfs_init: + * + * Allows encoders to create encoder-specific debugfs files. + */ + void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root); }; /** @@ -184,6 +191,13 @@ struct drm_encoder { const struct drm_encoder_funcs *funcs; const struct drm_encoder_helper_funcs *helper_private; + + /** + * @debugfs_entry: + * + * Debugfs directory for this CRTC. + */ + struct dentry *debugfs_entry; }; #define obj_to_encoder(x) container_of(x, struct drm_encoder, base) diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h index e0462361adf9..f1a66c048721 100644 --- a/include/drm/drm_exec.h +++ b/include/drm/drm_exec.h @@ -52,6 +52,20 @@ struct drm_exec { }; /** + * drm_exec_obj() - Return the object for a give drm_exec index + * @exec: Pointer to the drm_exec context + * @index: The index. + * + * Return: Pointer to the locked object corresponding to @index if + * index is within the number of locked objects. NULL otherwise. + */ +static inline struct drm_gem_object * +drm_exec_obj(struct drm_exec *exec, unsigned long index) +{ + return index < exec->num_objects ? exec->objects[index] : NULL; +} + +/** * drm_exec_for_each_locked_object - iterate over all the locked objects * @exec: drm_exec object * @index: unsigned long index for the iteration @@ -59,10 +73,23 @@ struct drm_exec { * * Iterate over all the locked GEM objects inside the drm_exec object. */ -#define drm_exec_for_each_locked_object(exec, index, obj) \ - for (index = 0, obj = (exec)->objects[0]; \ - index < (exec)->num_objects; \ - ++index, obj = (exec)->objects[index]) +#define drm_exec_for_each_locked_object(exec, index, obj) \ + for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index)) + +/** + * drm_exec_for_each_locked_object_reverse - iterate over all the locked + * objects in reverse locking order + * @exec: drm_exec object + * @index: unsigned long index for the iteration + * @obj: the current GEM object + * + * Iterate over all the locked GEM objects inside the drm_exec object in + * reverse locking order. Note that @index may go below zero and wrap, + * but that will be caught by drm_exec_obj(), returning a NULL object. + */ +#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \ + for ((index) = (exec)->num_objects - 1; \ + ((obj) = drm_exec_obj(exec, index)); --(index)) /** * drm_exec_until_all_locked - loop until all GEM objects are locked @@ -108,7 +135,7 @@ static inline bool drm_exec_is_contended(struct drm_exec *exec) return !!exec->contended; } -void drm_exec_init(struct drm_exec *exec, uint32_t flags); +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr); void drm_exec_fini(struct drm_exec *exec); bool drm_exec_cleanup(struct drm_exec *exec); int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj); diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 010239392adf..ab230d3af138 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -79,10 +79,8 @@ struct drm_minor { struct device *kdev; /* Linux device */ struct drm_device *dev; + struct dentry *debugfs_symlink; struct dentry *debugfs_root; - - struct list_head debugfs_list; - struct mutex debugfs_lock; /* Protects debugfs_list. */ }; /** @@ -229,6 +227,18 @@ struct drm_file { bool is_master; /** + * @supports_virtualized_cursor_plane: + * + * This client is capable of handling the cursor plane with the + * restrictions imposed on it by the virtualized drivers. + * + * This implies that the cursor plane has to behave like a cursor + * i.e. track cursor movement. It also requires setting of the + * hotspot properties by the client on the cursor plane. + */ + bool supports_virtualized_cursor_plane; + + /** * @master: * * Master this node is currently associated with. Protected by struct @@ -256,8 +266,15 @@ struct drm_file { /** @master_lookup_lock: Serializes @master. */ spinlock_t master_lookup_lock; - /** @pid: Process that opened this file. */ - struct pid *pid; + /** + * @pid: Process that is using this file. + * + * Must only be dereferenced under a rcu_read_lock or equivalent. + * + * Updates are guarded with dev->filelist_mutex and reference must be + * dropped after a RCU grace period to accommodate lockless readers. + */ + struct pid __rcu *pid; /** @client_id: A unique id for fdinfo */ u64 client_id; @@ -369,11 +386,6 @@ struct drm_file { * Per-file buffer caches used by the PRIME buffer sharing code. */ struct drm_prime_file_private prime; - - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - unsigned long lock_count; /* DRI1 legacy lock count */ -#endif }; /** @@ -420,6 +432,8 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv) return file_priv->minor->type == DRM_MINOR_ACCEL; } +void drm_file_update_pid(struct drm_file *); + int drm_open(struct inode *inode, struct file *filp); int drm_open_helper(struct file *filp, struct drm_minor *minor); ssize_t drm_read(struct file *filp, char __user *buffer, diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h index 21c3d512d25c..1eef3283a109 100644 --- a/include/drm/drm_flip_work.h +++ b/include/drm/drm_flip_work.h @@ -31,11 +31,10 @@ /** * DOC: flip utils * - * Util to queue up work to run from work-queue context after flip/vblank. + * Utility to queue up work to run from work-queue context after flip/vblank. * Typically this can be used to defer unref of framebuffer's, cursor - * bo's, etc until after vblank. The APIs are all thread-safe. - * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called - * in atomic context. + * bo's, etc until after vblank. The APIs are all thread-safe. Moreover, + * drm_flip_work_commit() can be called in atomic context. */ struct drm_flip_work; @@ -52,16 +51,6 @@ struct drm_flip_work; typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); /** - * struct drm_flip_task - flip work task - * @node: list entry element - * @data: data to pass to &drm_flip_work.func - */ -struct drm_flip_task { - struct list_head node; - void *data; -}; - -/** * struct drm_flip_work - flip work queue * @name: debug name * @func: callback fxn called for each committed item @@ -79,9 +68,6 @@ struct drm_flip_work { spinlock_t lock; }; -struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags); -void drm_flip_work_queue_task(struct drm_flip_work *work, - struct drm_flip_task *task); void drm_flip_work_queue(struct drm_flip_work *work, void *val); void drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq); diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 291deb09475b..f13b34e0b752 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -15,6 +15,57 @@ struct drm_rect; struct iosys_map; +/** + * struct drm_format_conv_state - Stores format-conversion state + * + * DRM helpers for format conversion store temporary state in + * struct drm_xfrm_buf. The buffer's resources can be reused + * among multiple conversion operations. + * + * All fields are considered private. + */ +struct drm_format_conv_state { + struct { + void *mem; + size_t size; + bool preallocated; + } tmp; +}; + +#define __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, _preallocated) { \ + .tmp = { \ + .mem = (_mem), \ + .size = (_size), \ + .preallocated = (_preallocated), \ + } \ + } + +/** + * DRM_FORMAT_CONV_STATE_INIT - Initializer for struct drm_format_conv_state + * + * Initializes an instance of struct drm_format_conv_state to default values. + */ +#define DRM_FORMAT_CONV_STATE_INIT \ + __DRM_FORMAT_CONV_STATE_INIT(NULL, 0, false) + +/** + * DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED - Initializer for struct drm_format_conv_state + * @_mem: The preallocated memory area + * @_size: The number of bytes in _mem + * + * Initializes an instance of struct drm_format_conv_state to preallocated + * storage. The caller is responsible for releasing the provided memory range. + */ +#define DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(_mem, _size) \ + __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, true) + +void drm_format_conv_state_init(struct drm_format_conv_state *state); +void drm_format_conv_state_copy(struct drm_format_conv_state *state, + const struct drm_format_conv_state *old_state); +void *drm_format_conv_state_reserve(struct drm_format_conv_state *state, + size_t new_size, gfp_t flags); +void drm_format_conv_state_release(struct drm_format_conv_state *state); + unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format, const struct drm_rect *clip); @@ -23,45 +74,49 @@ void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch, const struct drm_rect *clip); void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool cached); + const struct drm_rect *clip, bool cached, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool swab); + const struct drm_rect *clip, struct drm_format_conv_state *state, + bool swab); void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *rect); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); size_t drm_fb_build_fourcc_list(struct drm_device *dev, const u32 *native_fourccs, size_t native_nfourccs, diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 532ae78ca747..ccf91daa4307 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -22,6 +22,7 @@ #ifndef __DRM_FOURCC_H__ #define __DRM_FOURCC_H__ +#include <linux/math.h> #include <linux/types.h> #include <uapi/drm/drm_fourcc.h> @@ -279,7 +280,7 @@ int drm_format_info_plane_width(const struct drm_format_info *info, int width, if (plane == 0) return width; - return width / info->hsub; + return DIV_ROUND_UP(width, info->hsub); } /** @@ -301,7 +302,7 @@ int drm_format_info_plane_height(const struct drm_format_info *info, int height, if (plane == 0) return height; - return height / info->vsub; + return DIV_ROUND_UP(height, info->vsub); } const struct drm_format_info *__drm_format_info(u32 format); diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 0dcc07b68654..668077009fce 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -189,18 +189,6 @@ struct drm_framebuffer { */ int flags; /** - * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor - * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR - * universal plane. - */ - int hot_x; - /** - * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor - * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR - * universal plane. - */ - int hot_y; - /** * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ struct list_head filp_head; @@ -292,11 +280,6 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, &fb->head != (&(dev)->mode_config.fb_list); \ fb = list_next_entry(fb, head)) -int drm_framebuffer_plane_width(int width, - const struct drm_framebuffer *fb, int plane); -int drm_framebuffer_plane_height(int height, - const struct drm_framebuffer *fb, int plane); - /** * struct drm_afbc_framebuffer - a special afbc frame buffer object * diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index bc9f6aa2f3fe..369505447acd 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -209,6 +209,15 @@ struct drm_gem_object_funcs { enum drm_gem_object_status (*status)(struct drm_gem_object *obj); /** + * @rss: + * + * Return resident size of the object in physical memory. + * + * Called by drm_show_memory_stats(). + */ + size_t (*rss)(struct drm_gem_object *obj); + + /** * @vm_ops: * * Virtual memory operations used with mmap. @@ -571,7 +580,7 @@ int drm_gem_evict(struct drm_gem_object *obj); * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object * @obj: the &drm_gem_object * - * This initializes the &drm_gem_object's &drm_gpuva list. + * This initializes the &drm_gem_object's &drm_gpuvm_bo list. * * Calling this function is only necessary for drivers intending to support the * &drm_driver_feature DRIVER_GEM_GPUVA. @@ -584,28 +593,28 @@ static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) } /** - * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas - * @entry__: &drm_gpuva structure to assign to in each iteration step - * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with + * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo + * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step + * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with * - * This iterator walks over all &drm_gpuva structures associated with the - * &drm_gpuva_manager. + * This iterator walks over all &drm_gpuvm_bo structures associated with the + * &drm_gem_object. */ -#define drm_gem_for_each_gpuva(entry__, obj__) \ - list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry) +#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \ + list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem) /** - * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of - * gpuvas - * @entry__: &drm_gpuva structure to assign to in each iteration step - * @next__: &next &drm_gpuva to store the next step - * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with + * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of + * &drm_gpuvm_bo + * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step + * @next__: &next &drm_gpuvm_bo to store the next step + * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with * - * This iterator walks over all &drm_gpuva structures associated with the + * This iterator walks over all &drm_gpuvm_bo structures associated with the * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence * it is save against removal of elements. */ -#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \ - list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry) +#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \ + list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem) #endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h index 40b8b039518e..3e01c619a25e 100644 --- a/include/drm/drm_gem_atomic_helper.h +++ b/include/drm/drm_gem_atomic_helper.h @@ -5,6 +5,7 @@ #include <linux/iosys-map.h> +#include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> @@ -49,6 +50,15 @@ struct drm_shadow_plane_state { /** @base: plane state */ struct drm_plane_state base; + /** + * @fmtcnv_state: Format-conversion state + * + * Per-plane state for format conversion. + * Flags for copying shadow buffers into backend storage. Also holds + * temporary storage for format conversion. + */ + struct drm_format_conv_state fmtcnv_state; + /* Transitional state - do not export or duplicate */ /** diff --git a/include/drm/drm_gpuva_mgr.h b/include/drm/drm_gpuva_mgr.h deleted file mode 100644 index ed8d50200cc3..000000000000 --- a/include/drm/drm_gpuva_mgr.h +++ /dev/null @@ -1,706 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - -#ifndef __DRM_GPUVA_MGR_H__ -#define __DRM_GPUVA_MGR_H__ - -/* - * Copyright (c) 2022 Red Hat. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/list.h> -#include <linux/rbtree.h> -#include <linux/types.h> - -#include <drm/drm_gem.h> - -struct drm_gpuva_manager; -struct drm_gpuva_fn_ops; - -/** - * enum drm_gpuva_flags - flags for struct drm_gpuva - */ -enum drm_gpuva_flags { - /** - * @DRM_GPUVA_INVALIDATED: - * - * Flag indicating that the &drm_gpuva's backing GEM is invalidated. - */ - DRM_GPUVA_INVALIDATED = (1 << 0), - - /** - * @DRM_GPUVA_SPARSE: - * - * Flag indicating that the &drm_gpuva is a sparse mapping. - */ - DRM_GPUVA_SPARSE = (1 << 1), - - /** - * @DRM_GPUVA_USERBITS: user defined bits - */ - DRM_GPUVA_USERBITS = (1 << 2), -}; - -/** - * struct drm_gpuva - structure to track a GPU VA mapping - * - * This structure represents a GPU VA mapping and is associated with a - * &drm_gpuva_manager. - * - * Typically, this structure is embedded in bigger driver structures. - */ -struct drm_gpuva { - /** - * @mgr: the &drm_gpuva_manager this object is associated with - */ - struct drm_gpuva_manager *mgr; - - /** - * @flags: the &drm_gpuva_flags for this mapping - */ - enum drm_gpuva_flags flags; - - /** - * @va: structure containing the address and range of the &drm_gpuva - */ - struct { - /** - * @addr: the start address - */ - u64 addr; - - /* - * @range: the range - */ - u64 range; - } va; - - /** - * @gem: structure containing the &drm_gem_object and it's offset - */ - struct { - /** - * @offset: the offset within the &drm_gem_object - */ - u64 offset; - - /** - * @obj: the mapped &drm_gem_object - */ - struct drm_gem_object *obj; - - /** - * @entry: the &list_head to attach this object to a &drm_gem_object - */ - struct list_head entry; - } gem; - - /** - * @rb: structure containing data to store &drm_gpuvas in a rb-tree - */ - struct { - /** - * @rb: the rb-tree node - */ - struct rb_node node; - - /** - * @entry: The &list_head to additionally connect &drm_gpuvas - * in the same order they appear in the interval tree. This is - * useful to keep iterating &drm_gpuvas from a start node found - * through the rb-tree while doing modifications on the rb-tree - * itself. - */ - struct list_head entry; - - /** - * @__subtree_last: needed by the interval tree, holding last-in-subtree - */ - u64 __subtree_last; - } rb; -}; - -int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va); -void drm_gpuva_remove(struct drm_gpuva *va); - -void drm_gpuva_link(struct drm_gpuva *va); -void drm_gpuva_unlink(struct drm_gpuva *va); - -struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr, - u64 addr, u64 range); -struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr, - u64 addr, u64 range); -struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start); -struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end); - -bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range); - -static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset) -{ - va->va.addr = addr; - va->va.range = range; - va->gem.obj = obj; - va->gem.offset = offset; -} - -/** - * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is - * invalidated - * @va: the &drm_gpuva to set the invalidate flag for - * @invalidate: indicates whether the &drm_gpuva is invalidated - */ -static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate) -{ - if (invalidate) - va->flags |= DRM_GPUVA_INVALIDATED; - else - va->flags &= ~DRM_GPUVA_INVALIDATED; -} - -/** - * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva - * is invalidated - * @va: the &drm_gpuva to check - */ -static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) -{ - return va->flags & DRM_GPUVA_INVALIDATED; -} - -/** - * struct drm_gpuva_manager - DRM GPU VA Manager - * - * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using - * &maple_tree structures. Typically, this structure is embedded in bigger - * driver structures. - * - * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or - * pages. - * - * There should be one manager instance per GPU virtual address space. - */ -struct drm_gpuva_manager { - /** - * @name: the name of the DRM GPU VA space - */ - const char *name; - - /** - * @mm_start: start of the VA space - */ - u64 mm_start; - - /** - * @mm_range: length of the VA space - */ - u64 mm_range; - - /** - * @rb: structures to track &drm_gpuva entries - */ - struct { - /** - * @tree: the rb-tree to track GPU VA mappings - */ - struct rb_root_cached tree; - - /** - * @list: the &list_head to track GPU VA mappings - */ - struct list_head list; - } rb; - - /** - * @kernel_alloc_node: - * - * &drm_gpuva representing the address space cutout reserved for - * the kernel - */ - struct drm_gpuva kernel_alloc_node; - - /** - * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers - */ - const struct drm_gpuva_fn_ops *ops; -}; - -void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr, - const char *name, - u64 start_offset, u64 range, - u64 reserve_offset, u64 reserve_range, - const struct drm_gpuva_fn_ops *ops); -void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr); - -static inline struct drm_gpuva * -__drm_gpuva_next(struct drm_gpuva *va) -{ - if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list)) - return list_next_entry(va, rb.entry); - - return NULL; -} - -/** - * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas - * @va__: &drm_gpuva structure to assign to in each iteration step - * @mgr__: &drm_gpuva_manager to walk over - * @start__: starting offset, the first gpuva will overlap this - * @end__: ending offset, the last gpuva will start before this (but may - * overlap) - * - * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie - * between @start__ and @end__. It is implemented similarly to list_for_each(), - * but is using the &drm_gpuva_manager's internal interval tree to accelerate - * the search for the starting &drm_gpuva, and hence isn't safe against removal - * of elements. It assumes that @end__ is within (or is the upper limit of) the - * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's - * @kernel_alloc_node. - */ -#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \ - for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \ - va__ && (va__->va.addr < (end__)); \ - va__ = __drm_gpuva_next(va__)) - -/** - * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of - * &drm_gpuvas - * @va__: &drm_gpuva to assign to in each iteration step - * @next__: another &drm_gpuva to use as temporary storage - * @mgr__: &drm_gpuva_manager to walk over - * @start__: starting offset, the first gpuva will overlap this - * @end__: ending offset, the last gpuva will start before this (but may - * overlap) - * - * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie - * between @start__ and @end__. It is implemented similarly to - * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval - * tree to accelerate the search for the starting &drm_gpuva, and hence is safe - * against removal of elements. It assumes that @end__ is within (or is the - * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the - * &drm_gpuva_manager's @kernel_alloc_node. - */ -#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \ - for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \ - next__ = __drm_gpuva_next(va__); \ - va__ && (va__->va.addr < (end__)); \ - va__ = next__, next__ = __drm_gpuva_next(va__)) - -/** - * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas - * @va__: &drm_gpuva to assign to in each iteration step - * @mgr__: &drm_gpuva_manager to walk over - * - * This iterator walks over all &drm_gpuva structures associated with the given - * &drm_gpuva_manager. - */ -#define drm_gpuva_for_each_va(va__, mgr__) \ - list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry) - -/** - * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas - * @va__: &drm_gpuva to assign to in each iteration step - * @next__: another &drm_gpuva to use as temporary storage - * @mgr__: &drm_gpuva_manager to walk over - * - * This iterator walks over all &drm_gpuva structures associated with the given - * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and - * hence safe against the removal of elements. - */ -#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \ - list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry) - -/** - * enum drm_gpuva_op_type - GPU VA operation type - * - * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager. - */ -enum drm_gpuva_op_type { - /** - * @DRM_GPUVA_OP_MAP: the map op type - */ - DRM_GPUVA_OP_MAP, - - /** - * @DRM_GPUVA_OP_REMAP: the remap op type - */ - DRM_GPUVA_OP_REMAP, - - /** - * @DRM_GPUVA_OP_UNMAP: the unmap op type - */ - DRM_GPUVA_OP_UNMAP, - - /** - * @DRM_GPUVA_OP_PREFETCH: the prefetch op type - */ - DRM_GPUVA_OP_PREFETCH, -}; - -/** - * struct drm_gpuva_op_map - GPU VA map operation - * - * This structure represents a single map operation generated by the - * DRM GPU VA manager. - */ -struct drm_gpuva_op_map { - /** - * @va: structure containing address and range of a map - * operation - */ - struct { - /** - * @addr: the base address of the new mapping - */ - u64 addr; - - /** - * @range: the range of the new mapping - */ - u64 range; - } va; - - /** - * @gem: structure containing the &drm_gem_object and it's offset - */ - struct { - /** - * @offset: the offset within the &drm_gem_object - */ - u64 offset; - - /** - * @obj: the &drm_gem_object to map - */ - struct drm_gem_object *obj; - } gem; -}; - -/** - * struct drm_gpuva_op_unmap - GPU VA unmap operation - * - * This structure represents a single unmap operation generated by the - * DRM GPU VA manager. - */ -struct drm_gpuva_op_unmap { - /** - * @va: the &drm_gpuva to unmap - */ - struct drm_gpuva *va; - - /** - * @keep: - * - * Indicates whether this &drm_gpuva is physically contiguous with the - * original mapping request. - * - * Optionally, if &keep is set, drivers may keep the actual page table - * mappings for this &drm_gpuva, adding the missing page table entries - * only and update the &drm_gpuva_manager accordingly. - */ - bool keep; -}; - -/** - * struct drm_gpuva_op_remap - GPU VA remap operation - * - * This represents a single remap operation generated by the DRM GPU VA manager. - * - * A remap operation is generated when an existing GPU VA mmapping is split up - * by inserting a new GPU VA mapping or by partially unmapping existent - * mapping(s), hence it consists of a maximum of two map and one unmap - * operation. - * - * The @unmap operation takes care of removing the original existing mapping. - * @prev is used to remap the preceding part, @next the subsequent part. - * - * If either a new mapping's start address is aligned with the start address - * of the old mapping or the new mapping's end address is aligned with the - * end address of the old mapping, either @prev or @next is NULL. - * - * Note, the reason for a dedicated remap operation, rather than arbitrary - * unmap and map operations, is to give drivers the chance of extracting driver - * specific data for creating the new mappings from the unmap operations's - * &drm_gpuva structure which typically is embedded in larger driver specific - * structures. - */ -struct drm_gpuva_op_remap { - /** - * @prev: the preceding part of a split mapping - */ - struct drm_gpuva_op_map *prev; - - /** - * @next: the subsequent part of a split mapping - */ - struct drm_gpuva_op_map *next; - - /** - * @unmap: the unmap operation for the original existing mapping - */ - struct drm_gpuva_op_unmap *unmap; -}; - -/** - * struct drm_gpuva_op_prefetch - GPU VA prefetch operation - * - * This structure represents a single prefetch operation generated by the - * DRM GPU VA manager. - */ -struct drm_gpuva_op_prefetch { - /** - * @va: the &drm_gpuva to prefetch - */ - struct drm_gpuva *va; -}; - -/** - * struct drm_gpuva_op - GPU VA operation - * - * This structure represents a single generic operation. - * - * The particular type of the operation is defined by @op. - */ -struct drm_gpuva_op { - /** - * @entry: - * - * The &list_head used to distribute instances of this struct within - * &drm_gpuva_ops. - */ - struct list_head entry; - - /** - * @op: the type of the operation - */ - enum drm_gpuva_op_type op; - - union { - /** - * @map: the map operation - */ - struct drm_gpuva_op_map map; - - /** - * @remap: the remap operation - */ - struct drm_gpuva_op_remap remap; - - /** - * @unmap: the unmap operation - */ - struct drm_gpuva_op_unmap unmap; - - /** - * @prefetch: the prefetch operation - */ - struct drm_gpuva_op_prefetch prefetch; - }; -}; - -/** - * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op - */ -struct drm_gpuva_ops { - /** - * @list: the &list_head - */ - struct list_head list; -}; - -/** - * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops - * @op: &drm_gpuva_op to assign in each iteration step - * @ops: &drm_gpuva_ops to walk - * - * This iterator walks over all ops within a given list of operations. - */ -#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry) - -/** - * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops - * @op: &drm_gpuva_op to assign in each iteration step - * @next: &next &drm_gpuva_op to store the next step - * @ops: &drm_gpuva_ops to walk - * - * This iterator walks over all ops within a given list of operations. It is - * implemented with list_for_each_safe(), so save against removal of elements. - */ -#define drm_gpuva_for_each_op_safe(op, next, ops) \ - list_for_each_entry_safe(op, next, &(ops)->list, entry) - -/** - * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point - * @op: &drm_gpuva_op to assign in each iteration step - * @ops: &drm_gpuva_ops to walk - * - * This iterator walks over all ops within a given list of operations beginning - * from the given operation in reverse order. - */ -#define drm_gpuva_for_each_op_from_reverse(op, ops) \ - list_for_each_entry_from_reverse(op, &(ops)->list, entry) - -/** - * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops - * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from - */ -#define drm_gpuva_first_op(ops) \ - list_first_entry(&(ops)->list, struct drm_gpuva_op, entry) - -/** - * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops - * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from - */ -#define drm_gpuva_last_op(ops) \ - list_last_entry(&(ops)->list, struct drm_gpuva_op, entry) - -/** - * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list - * @op: the current &drm_gpuva_op - */ -#define drm_gpuva_prev_op(op) list_prev_entry(op, entry) - -/** - * drm_gpuva_next_op() - next &drm_gpuva_op in the list - * @op: the current &drm_gpuva_op - */ -#define drm_gpuva_next_op(op) list_next_entry(op, entry) - -struct drm_gpuva_ops * -drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset); -struct drm_gpuva_ops * -drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr, - u64 addr, u64 range); - -struct drm_gpuva_ops * -drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr, - u64 addr, u64 range); - -struct drm_gpuva_ops * -drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr, - struct drm_gem_object *obj); - -void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr, - struct drm_gpuva_ops *ops); - -static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, - struct drm_gpuva_op_map *op) -{ - drm_gpuva_init(va, op->va.addr, op->va.range, - op->gem.obj, op->gem.offset); -} - -/** - * struct drm_gpuva_fn_ops - callbacks for split/merge steps - * - * This structure defines the callbacks used by &drm_gpuva_sm_map and - * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap - * operations to drivers. - */ -struct drm_gpuva_fn_ops { - /** - * @op_alloc: called when the &drm_gpuva_manager allocates - * a struct drm_gpuva_op - * - * Some drivers may want to embed struct drm_gpuva_op into driver - * specific structures. By implementing this callback drivers can - * allocate memory accordingly. - * - * This callback is optional. - */ - struct drm_gpuva_op *(*op_alloc)(void); - - /** - * @op_free: called when the &drm_gpuva_manager frees a - * struct drm_gpuva_op - * - * Some drivers may want to embed struct drm_gpuva_op into driver - * specific structures. By implementing this callback drivers can - * free the previously allocated memory accordingly. - * - * This callback is optional. - */ - void (*op_free)(struct drm_gpuva_op *op); - - /** - * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the - * mapping once all previous steps were completed - * - * The &priv pointer matches the one the driver passed to - * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. - * - * Can be NULL if &drm_gpuva_sm_map is used. - */ - int (*sm_step_map)(struct drm_gpuva_op *op, void *priv); - - /** - * @sm_step_remap: called from &drm_gpuva_sm_map and - * &drm_gpuva_sm_unmap to split up an existent mapping - * - * This callback is called when existent mapping needs to be split up. - * This is the case when either a newly requested mapping overlaps or - * is enclosed by an existent mapping or a partial unmap of an existent - * mapping is requested. - * - * The &priv pointer matches the one the driver passed to - * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. - * - * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is - * used. - */ - int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv); - - /** - * @sm_step_unmap: called from &drm_gpuva_sm_map and - * &drm_gpuva_sm_unmap to unmap an existent mapping - * - * This callback is called when existent mapping needs to be unmapped. - * This is the case when either a newly requested mapping encloses an - * existent mapping or an unmap of an existent mapping is requested. - * - * The &priv pointer matches the one the driver passed to - * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. - * - * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is - * used. - */ - int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv); -}; - -int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset); - -int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv, - u64 addr, u64 range); - -void drm_gpuva_map(struct drm_gpuva_manager *mgr, - struct drm_gpuva *va, - struct drm_gpuva_op_map *op); - -void drm_gpuva_remap(struct drm_gpuva *prev, - struct drm_gpuva *next, - struct drm_gpuva_op_remap *op); - -void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op); - -#endif /* __DRM_GPUVA_MGR_H__ */ diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h new file mode 100644 index 000000000000..9060f9fae6f1 --- /dev/null +++ b/include/drm/drm_gpuvm.h @@ -0,0 +1,1247 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ + +#ifndef __DRM_GPUVM_H__ +#define __DRM_GPUVM_H__ + +/* + * Copyright (c) 2022 Red Hat. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/dma-resv.h> +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/types.h> + +#include <drm/drm_device.h> +#include <drm/drm_gem.h> +#include <drm/drm_exec.h> + +struct drm_gpuvm; +struct drm_gpuvm_bo; +struct drm_gpuvm_ops; + +/** + * enum drm_gpuva_flags - flags for struct drm_gpuva + */ +enum drm_gpuva_flags { + /** + * @DRM_GPUVA_INVALIDATED: + * + * Flag indicating that the &drm_gpuva's backing GEM is invalidated. + */ + DRM_GPUVA_INVALIDATED = (1 << 0), + + /** + * @DRM_GPUVA_SPARSE: + * + * Flag indicating that the &drm_gpuva is a sparse mapping. + */ + DRM_GPUVA_SPARSE = (1 << 1), + + /** + * @DRM_GPUVA_USERBITS: user defined bits + */ + DRM_GPUVA_USERBITS = (1 << 2), +}; + +/** + * struct drm_gpuva - structure to track a GPU VA mapping + * + * This structure represents a GPU VA mapping and is associated with a + * &drm_gpuvm. + * + * Typically, this structure is embedded in bigger driver structures. + */ +struct drm_gpuva { + /** + * @vm: the &drm_gpuvm this object is associated with + */ + struct drm_gpuvm *vm; + + /** + * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped + * &drm_gem_object + */ + struct drm_gpuvm_bo *vm_bo; + + /** + * @flags: the &drm_gpuva_flags for this mapping + */ + enum drm_gpuva_flags flags; + + /** + * @va: structure containing the address and range of the &drm_gpuva + */ + struct { + /** + * @va.addr: the start address + */ + u64 addr; + + /* + * @range: the range + */ + u64 range; + } va; + + /** + * @gem: structure containing the &drm_gem_object and it's offset + */ + struct { + /** + * @gem.offset: the offset within the &drm_gem_object + */ + u64 offset; + + /** + * @gem.obj: the mapped &drm_gem_object + */ + struct drm_gem_object *obj; + + /** + * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo + */ + struct list_head entry; + } gem; + + /** + * @rb: structure containing data to store &drm_gpuvas in a rb-tree + */ + struct { + /** + * @rb.node: the rb-tree node + */ + struct rb_node node; + + /** + * @rb.entry: The &list_head to additionally connect &drm_gpuvas + * in the same order they appear in the interval tree. This is + * useful to keep iterating &drm_gpuvas from a start node found + * through the rb-tree while doing modifications on the rb-tree + * itself. + */ + struct list_head entry; + + /** + * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree + */ + u64 __subtree_last; + } rb; +}; + +int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va); +void drm_gpuva_remove(struct drm_gpuva *va); + +void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); +void drm_gpuva_unlink(struct drm_gpuva *va); + +struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm, + u64 addr, u64 range); +struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm, + u64 addr, u64 range); +struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start); +struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end); + +static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, + struct drm_gem_object *obj, u64 offset) +{ + va->va.addr = addr; + va->va.range = range; + va->gem.obj = obj; + va->gem.offset = offset; +} + +/** + * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is + * invalidated + * @va: the &drm_gpuva to set the invalidate flag for + * @invalidate: indicates whether the &drm_gpuva is invalidated + */ +static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate) +{ + if (invalidate) + va->flags |= DRM_GPUVA_INVALIDATED; + else + va->flags &= ~DRM_GPUVA_INVALIDATED; +} + +/** + * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva + * is invalidated + * @va: the &drm_gpuva to check + * + * Returns: %true if the GPU VA is invalidated, %false otherwise + */ +static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) +{ + return va->flags & DRM_GPUVA_INVALIDATED; +} + +/** + * enum drm_gpuvm_flags - flags for struct drm_gpuvm + */ +enum drm_gpuvm_flags { + /** + * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the + * GPUVM's &dma_resv lock + */ + DRM_GPUVM_RESV_PROTECTED = BIT(0), + + /** + * @DRM_GPUVM_USERBITS: user defined bits + */ + DRM_GPUVM_USERBITS = BIT(1), +}; + +/** + * struct drm_gpuvm - DRM GPU VA Manager + * + * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using + * &maple_tree structures. Typically, this structure is embedded in bigger + * driver structures. + * + * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or + * pages. + * + * There should be one manager instance per GPU virtual address space. + */ +struct drm_gpuvm { + /** + * @name: the name of the DRM GPU VA space + */ + const char *name; + + /** + * @flags: the &drm_gpuvm_flags of this GPUVM + */ + enum drm_gpuvm_flags flags; + + /** + * @drm: the &drm_device this VM lives in + */ + struct drm_device *drm; + + /** + * @mm_start: start of the VA space + */ + u64 mm_start; + + /** + * @mm_range: length of the VA space + */ + u64 mm_range; + + /** + * @rb: structures to track &drm_gpuva entries + */ + struct { + /** + * @rb.tree: the rb-tree to track GPU VA mappings + */ + struct rb_root_cached tree; + + /** + * @rb.list: the &list_head to track GPU VA mappings + */ + struct list_head list; + } rb; + + /** + * @kref: reference count of this object + */ + struct kref kref; + + /** + * @kernel_alloc_node: + * + * &drm_gpuva representing the address space cutout reserved for + * the kernel + */ + struct drm_gpuva kernel_alloc_node; + + /** + * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers + */ + const struct drm_gpuvm_ops *ops; + + /** + * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv. + */ + struct drm_gem_object *r_obj; + + /** + * @extobj: structure holding the extobj list + */ + struct { + /** + * @extobj.list: &list_head storing &drm_gpuvm_bos serving as + * external object + */ + struct list_head list; + + /** + * @extobj.local_list: pointer to the local list temporarily + * storing entries from the external object list + */ + struct list_head *local_list; + + /** + * @extobj.lock: spinlock to protect the extobj list + */ + spinlock_t lock; + } extobj; + + /** + * @evict: structure holding the evict list and evict list lock + */ + struct { + /** + * @evict.list: &list_head storing &drm_gpuvm_bos currently + * being evicted + */ + struct list_head list; + + /** + * @evict.local_list: pointer to the local list temporarily + * storing entries from the evicted object list + */ + struct list_head *local_list; + + /** + * @evict.lock: spinlock to protect the evict list + */ + spinlock_t lock; + } evict; +}; + +void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, + enum drm_gpuvm_flags flags, + struct drm_device *drm, + struct drm_gem_object *r_obj, + u64 start_offset, u64 range, + u64 reserve_offset, u64 reserve_range, + const struct drm_gpuvm_ops *ops); + +/** + * drm_gpuvm_get() - acquire a struct drm_gpuvm reference + * @gpuvm: the &drm_gpuvm to acquire the reference of + * + * This function acquires an additional reference to @gpuvm. It is illegal to + * call this without already holding a reference. No locks required. + * + * Returns: the &struct drm_gpuvm pointer + */ +static inline struct drm_gpuvm * +drm_gpuvm_get(struct drm_gpuvm *gpuvm) +{ + kref_get(&gpuvm->kref); + + return gpuvm; +} + +void drm_gpuvm_put(struct drm_gpuvm *gpuvm); + +bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); +bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); + +struct drm_gem_object * +drm_gpuvm_resv_object_alloc(struct drm_device *drm); + +/** + * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is + * set + * @gpuvm: the &drm_gpuvm + * + * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise. + */ +static inline bool +drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) +{ + return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED; +} + +/** + * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv + * @gpuvm__: the &drm_gpuvm + * + * Returns: a pointer to the &drm_gpuvm's shared &dma_resv + */ +#define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv) + +/** + * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's + * &dma_resv + * @gpuvm__: the &drm_gpuvm + * + * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared + * &dma_resv + */ +#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj) + +#define drm_gpuvm_resv_held(gpuvm__) \ + dma_resv_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_assert_held(gpuvm__) \ + dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_held(gpuvm__) \ + dma_resv_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_assert_held(gpuvm__) \ + dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) + +/** + * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an + * external object + * @gpuvm: the &drm_gpuvm to check + * @obj: the &drm_gem_object to check + * + * Returns: true if the &drm_gem_object &dma_resv differs from the + * &drm_gpuvms &dma_resv, false otherwise + */ +static inline bool +drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + return obj && obj->resv != drm_gpuvm_resv(gpuvm); +} + +static inline struct drm_gpuva * +__drm_gpuva_next(struct drm_gpuva *va) +{ + if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list)) + return list_next_entry(va, rb.entry); + + return NULL; +} + +/** + * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas + * @va__: &drm_gpuva structure to assign to in each iteration step + * @gpuvm__: &drm_gpuvm to walk over + * @start__: starting offset, the first gpuva will overlap this + * @end__: ending offset, the last gpuva will start before this (but may + * overlap) + * + * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie + * between @start__ and @end__. It is implemented similarly to list_for_each(), + * but is using the &drm_gpuvm's internal interval tree to accelerate + * the search for the starting &drm_gpuva, and hence isn't safe against removal + * of elements. It assumes that @end__ is within (or is the upper limit of) the + * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's + * @kernel_alloc_node. + */ +#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \ + for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \ + va__ && (va__->va.addr < (end__)); \ + va__ = __drm_gpuva_next(va__)) + +/** + * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of + * &drm_gpuvas + * @va__: &drm_gpuva to assign to in each iteration step + * @next__: another &drm_gpuva to use as temporary storage + * @gpuvm__: &drm_gpuvm to walk over + * @start__: starting offset, the first gpuva will overlap this + * @end__: ending offset, the last gpuva will start before this (but may + * overlap) + * + * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie + * between @start__ and @end__. It is implemented similarly to + * list_for_each_safe(), but is using the &drm_gpuvm's internal interval + * tree to accelerate the search for the starting &drm_gpuva, and hence is safe + * against removal of elements. It assumes that @end__ is within (or is the + * upper limit of) the &drm_gpuvm. This iterator does not skip over the + * &drm_gpuvm's @kernel_alloc_node. + */ +#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \ + for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \ + next__ = __drm_gpuva_next(va__); \ + va__ && (va__->va.addr < (end__)); \ + va__ = next__, next__ = __drm_gpuva_next(va__)) + +/** + * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas + * @va__: &drm_gpuva to assign to in each iteration step + * @gpuvm__: &drm_gpuvm to walk over + * + * This iterator walks over all &drm_gpuva structures associated with the given + * &drm_gpuvm. + */ +#define drm_gpuvm_for_each_va(va__, gpuvm__) \ + list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry) + +/** + * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas + * @va__: &drm_gpuva to assign to in each iteration step + * @next__: another &drm_gpuva to use as temporary storage + * @gpuvm__: &drm_gpuvm to walk over + * + * This iterator walks over all &drm_gpuva structures associated with the given + * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and + * hence safe against the removal of elements. + */ +#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \ + list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry) + +/** + * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec + * + * This structure should be created on the stack as &drm_exec should be. + * + * Optionally, @extra can be set in order to lock additional &drm_gem_objects. + */ +struct drm_gpuvm_exec { + /** + * @exec: the &drm_exec structure + */ + struct drm_exec exec; + + /** + * @flags: the flags for the struct drm_exec + */ + uint32_t flags; + + /** + * @vm: the &drm_gpuvm to lock its DMA reservations + */ + struct drm_gpuvm *vm; + + /** + * @num_fences: the number of fences to reserve for the &dma_resv of the + * locked &drm_gem_objects + */ + unsigned int num_fences; + + /** + * @extra: Callback and corresponding private data for the driver to + * lock arbitrary additional &drm_gem_objects. + */ + struct { + /** + * @extra.fn: The driver callback to lock additional + * &drm_gem_objects. + */ + int (*fn)(struct drm_gpuvm_exec *vm_exec); + + /** + * @extra.priv: driver private data for the @fn callback + */ + void *priv; + } extra; +}; + +int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences); + +int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences); + +int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + u64 addr, u64 range, + unsigned int num_fences); + +int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec); + +int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec, + struct drm_gem_object **objs, + unsigned int num_objs); + +int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec, + u64 addr, u64 range); + +/** + * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * Releases all dma-resv locks of all &drm_gem_objects previously acquired + * through drm_gpuvm_exec_lock() or its variants. + * + * Returns: 0 on success, negative error code on failure. + */ +static inline void +drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec) +{ + drm_exec_fini(&vm_exec->exec); +} + +int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec); +void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage); + +/** + * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj + * @vm_exec: the &drm_gpuvm_exec wrapper + * @fence: fence to add + * @private_usage: private dma-resv usage + * @extobj_usage: extobj dma-resv usage + * + * See drm_gpuvm_resv_add_fence(). + */ +static inline void +drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage) +{ + drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence, + private_usage, extobj_usage); +} + +/** + * drm_gpuvm_exec_validate() - validate all BOs marked as evicted + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * See drm_gpuvm_validate(). + * + * Returns: 0 on success, negative error code on failure. + */ +static inline int +drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec) +{ + return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); +} + +/** + * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and + * &drm_gem_object combination + * + * This structure is an abstraction representing a &drm_gpuvm and + * &drm_gem_object combination. It serves as an indirection to accelerate + * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same + * &drm_gem_object. + * + * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to + * accelerate validation. + * + * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once + * a GEM object is mapped first in a GPU-VM and release the instance once the + * last mapping of the GEM object in this GPU-VM is unmapped. + */ +struct drm_gpuvm_bo { + /** + * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference + * counted pointer. + */ + struct drm_gpuvm *vm; + + /** + * @obj: The &drm_gem_object being mapped in @vm. This is a reference + * counted pointer. + */ + struct drm_gem_object *obj; + + /** + * @evicted: Indicates whether the &drm_gem_object is evicted; field + * protected by the &drm_gem_object's dma-resv lock. + */ + bool evicted; + + /** + * @kref: The reference count for this &drm_gpuvm_bo. + */ + struct kref kref; + + /** + * @list: Structure containing all &list_heads. + */ + struct { + /** + * @list.gpuva: The list of linked &drm_gpuvas. + * + * It is safe to access entries from this list as long as the + * GEM's gpuva lock is held. See also struct drm_gem_object. + */ + struct list_head gpuva; + + /** + * @list.entry: Structure containing all &list_heads serving as + * entry. + */ + struct { + /** + * @list.entry.gem: List entry to attach to the + * &drm_gem_objects gpuva list. + */ + struct list_head gem; + + /** + * @list.entry.evict: List entry to attach to the + * &drm_gpuvms extobj list. + */ + struct list_head extobj; + + /** + * @list.entry.evict: List entry to attach to the + * &drm_gpuvms evict list. + */ + struct list_head evict; + } entry; + } list; +}; + +struct drm_gpuvm_bo * +drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); + +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo); + +/** + * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference + * @vm_bo: the &drm_gpuvm_bo to acquire the reference of + * + * This function acquires an additional reference to @vm_bo. It is illegal to + * call this without already holding a reference. No locks required. + * + * Returns: the &struct vm_bo pointer + */ +static inline struct drm_gpuvm_bo * +drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) +{ + kref_get(&vm_bo->kref); + return vm_bo; +} + +bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); + +struct drm_gpuvm_bo * +drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); + +void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict); + +/** + * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list + * to/from the &drm_gpuvms evicted list + * @obj: the &drm_gem_object + * @evict: indicates whether @obj is evicted + * + * See drm_gpuvm_bo_evict(). + */ +static inline void +drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict) +{ + struct drm_gpuvm_bo *vm_bo; + + drm_gem_gpuva_assert_lock_held(obj); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) + drm_gpuvm_bo_evict(vm_bo, evict); +} + +void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo); + +/** + * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva + * @va__: &drm_gpuva structure to assign to in each iteration step + * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gpuvm_bo. + * + * The caller must hold the GEM's gpuva lock. + */ +#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \ + list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry) + +/** + * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of + * &drm_gpuva + * @va__: &drm_gpuva structure to assign to in each iteration step + * @next__: &next &drm_gpuva to store the next step + * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence + * it is save against removal of elements. + * + * The caller must hold the GEM's gpuva lock. + */ +#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \ + list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry) + +/** + * enum drm_gpuva_op_type - GPU VA operation type + * + * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm. + */ +enum drm_gpuva_op_type { + /** + * @DRM_GPUVA_OP_MAP: the map op type + */ + DRM_GPUVA_OP_MAP, + + /** + * @DRM_GPUVA_OP_REMAP: the remap op type + */ + DRM_GPUVA_OP_REMAP, + + /** + * @DRM_GPUVA_OP_UNMAP: the unmap op type + */ + DRM_GPUVA_OP_UNMAP, + + /** + * @DRM_GPUVA_OP_PREFETCH: the prefetch op type + */ + DRM_GPUVA_OP_PREFETCH, +}; + +/** + * struct drm_gpuva_op_map - GPU VA map operation + * + * This structure represents a single map operation generated by the + * DRM GPU VA manager. + */ +struct drm_gpuva_op_map { + /** + * @va: structure containing address and range of a map + * operation + */ + struct { + /** + * @va.addr: the base address of the new mapping + */ + u64 addr; + + /** + * @va.range: the range of the new mapping + */ + u64 range; + } va; + + /** + * @gem: structure containing the &drm_gem_object and it's offset + */ + struct { + /** + * @gem.offset: the offset within the &drm_gem_object + */ + u64 offset; + + /** + * @gem.obj: the &drm_gem_object to map + */ + struct drm_gem_object *obj; + } gem; +}; + +/** + * struct drm_gpuva_op_unmap - GPU VA unmap operation + * + * This structure represents a single unmap operation generated by the + * DRM GPU VA manager. + */ +struct drm_gpuva_op_unmap { + /** + * @va: the &drm_gpuva to unmap + */ + struct drm_gpuva *va; + + /** + * @keep: + * + * Indicates whether this &drm_gpuva is physically contiguous with the + * original mapping request. + * + * Optionally, if &keep is set, drivers may keep the actual page table + * mappings for this &drm_gpuva, adding the missing page table entries + * only and update the &drm_gpuvm accordingly. + */ + bool keep; +}; + +/** + * struct drm_gpuva_op_remap - GPU VA remap operation + * + * This represents a single remap operation generated by the DRM GPU VA manager. + * + * A remap operation is generated when an existing GPU VA mmapping is split up + * by inserting a new GPU VA mapping or by partially unmapping existent + * mapping(s), hence it consists of a maximum of two map and one unmap + * operation. + * + * The @unmap operation takes care of removing the original existing mapping. + * @prev is used to remap the preceding part, @next the subsequent part. + * + * If either a new mapping's start address is aligned with the start address + * of the old mapping or the new mapping's end address is aligned with the + * end address of the old mapping, either @prev or @next is NULL. + * + * Note, the reason for a dedicated remap operation, rather than arbitrary + * unmap and map operations, is to give drivers the chance of extracting driver + * specific data for creating the new mappings from the unmap operations's + * &drm_gpuva structure which typically is embedded in larger driver specific + * structures. + */ +struct drm_gpuva_op_remap { + /** + * @prev: the preceding part of a split mapping + */ + struct drm_gpuva_op_map *prev; + + /** + * @next: the subsequent part of a split mapping + */ + struct drm_gpuva_op_map *next; + + /** + * @unmap: the unmap operation for the original existing mapping + */ + struct drm_gpuva_op_unmap *unmap; +}; + +/** + * struct drm_gpuva_op_prefetch - GPU VA prefetch operation + * + * This structure represents a single prefetch operation generated by the + * DRM GPU VA manager. + */ +struct drm_gpuva_op_prefetch { + /** + * @va: the &drm_gpuva to prefetch + */ + struct drm_gpuva *va; +}; + +/** + * struct drm_gpuva_op - GPU VA operation + * + * This structure represents a single generic operation. + * + * The particular type of the operation is defined by @op. + */ +struct drm_gpuva_op { + /** + * @entry: + * + * The &list_head used to distribute instances of this struct within + * &drm_gpuva_ops. + */ + struct list_head entry; + + /** + * @op: the type of the operation + */ + enum drm_gpuva_op_type op; + + union { + /** + * @map: the map operation + */ + struct drm_gpuva_op_map map; + + /** + * @remap: the remap operation + */ + struct drm_gpuva_op_remap remap; + + /** + * @unmap: the unmap operation + */ + struct drm_gpuva_op_unmap unmap; + + /** + * @prefetch: the prefetch operation + */ + struct drm_gpuva_op_prefetch prefetch; + }; +}; + +/** + * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op + */ +struct drm_gpuva_ops { + /** + * @list: the &list_head + */ + struct list_head list; +}; + +/** + * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops + * @op: &drm_gpuva_op to assign in each iteration step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations. + */ +#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry) + +/** + * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops + * @op: &drm_gpuva_op to assign in each iteration step + * @next: &next &drm_gpuva_op to store the next step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations. It is + * implemented with list_for_each_safe(), so save against removal of elements. + */ +#define drm_gpuva_for_each_op_safe(op, next, ops) \ + list_for_each_entry_safe(op, next, &(ops)->list, entry) + +/** + * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point + * @op: &drm_gpuva_op to assign in each iteration step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations beginning + * from the given operation in reverse order. + */ +#define drm_gpuva_for_each_op_from_reverse(op, ops) \ + list_for_each_entry_from_reverse(op, &(ops)->list, entry) + +/** + * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse + * @op: &drm_gpuva_op to assign in each iteration step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations in reverse + */ +#define drm_gpuva_for_each_op_reverse(op, ops) \ + list_for_each_entry_reverse(op, &(ops)->list, entry) + +/** + * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops + * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from + */ +#define drm_gpuva_first_op(ops) \ + list_first_entry(&(ops)->list, struct drm_gpuva_op, entry) + +/** + * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops + * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from + */ +#define drm_gpuva_last_op(ops) \ + list_last_entry(&(ops)->list, struct drm_gpuva_op, entry) + +/** + * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list + * @op: the current &drm_gpuva_op + */ +#define drm_gpuva_prev_op(op) list_prev_entry(op, entry) + +/** + * drm_gpuva_next_op() - next &drm_gpuva_op in the list + * @op: the current &drm_gpuva_op + */ +#define drm_gpuva_next_op(op) list_next_entry(op, entry) + +struct drm_gpuva_ops * +drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, + u64 addr, u64 range, + struct drm_gem_object *obj, u64 offset); +struct drm_gpuva_ops * +drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, + u64 addr, u64 range); + +struct drm_gpuva_ops * +drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, + u64 addr, u64 range); + +struct drm_gpuva_ops * +drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo); + +void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, + struct drm_gpuva_ops *ops); + +static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, + struct drm_gpuva_op_map *op) +{ + drm_gpuva_init(va, op->va.addr, op->va.range, + op->gem.obj, op->gem.offset); +} + +/** + * struct drm_gpuvm_ops - callbacks for split/merge steps + * + * This structure defines the callbacks used by &drm_gpuvm_sm_map and + * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap + * operations to drivers. + */ +struct drm_gpuvm_ops { + /** + * @vm_free: called when the last reference of a struct drm_gpuvm is + * dropped + * + * This callback is mandatory. + */ + void (*vm_free)(struct drm_gpuvm *gpuvm); + + /** + * @op_alloc: called when the &drm_gpuvm allocates + * a struct drm_gpuva_op + * + * Some drivers may want to embed struct drm_gpuva_op into driver + * specific structures. By implementing this callback drivers can + * allocate memory accordingly. + * + * This callback is optional. + */ + struct drm_gpuva_op *(*op_alloc)(void); + + /** + * @op_free: called when the &drm_gpuvm frees a + * struct drm_gpuva_op + * + * Some drivers may want to embed struct drm_gpuva_op into driver + * specific structures. By implementing this callback drivers can + * free the previously allocated memory accordingly. + * + * This callback is optional. + */ + void (*op_free)(struct drm_gpuva_op *op); + + /** + * @vm_bo_alloc: called when the &drm_gpuvm allocates + * a struct drm_gpuvm_bo + * + * Some drivers may want to embed struct drm_gpuvm_bo into driver + * specific structures. By implementing this callback drivers can + * allocate memory accordingly. + * + * This callback is optional. + */ + struct drm_gpuvm_bo *(*vm_bo_alloc)(void); + + /** + * @vm_bo_free: called when the &drm_gpuvm frees a + * struct drm_gpuvm_bo + * + * Some drivers may want to embed struct drm_gpuvm_bo into driver + * specific structures. By implementing this callback drivers can + * free the previously allocated memory accordingly. + * + * This callback is optional. + */ + void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo); + + /** + * @vm_bo_validate: called from drm_gpuvm_validate() + * + * Drivers receive this callback for every evicted &drm_gem_object being + * mapped in the corresponding &drm_gpuvm. + * + * Typically, drivers would call their driver specific variant of + * ttm_bo_validate() from within this callback. + */ + int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo, + struct drm_exec *exec); + + /** + * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the + * mapping once all previous steps were completed + * + * The &priv pointer matches the one the driver passed to + * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. + * + * Can be NULL if &drm_gpuvm_sm_map is used. + */ + int (*sm_step_map)(struct drm_gpuva_op *op, void *priv); + + /** + * @sm_step_remap: called from &drm_gpuvm_sm_map and + * &drm_gpuvm_sm_unmap to split up an existent mapping + * + * This callback is called when existent mapping needs to be split up. + * This is the case when either a newly requested mapping overlaps or + * is enclosed by an existent mapping or a partial unmap of an existent + * mapping is requested. + * + * The &priv pointer matches the one the driver passed to + * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. + * + * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is + * used. + */ + int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv); + + /** + * @sm_step_unmap: called from &drm_gpuvm_sm_map and + * &drm_gpuvm_sm_unmap to unmap an existent mapping + * + * This callback is called when existent mapping needs to be unmapped. + * This is the case when either a newly requested mapping encloses an + * existent mapping or an unmap of an existent mapping is requested. + * + * The &priv pointer matches the one the driver passed to + * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. + * + * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is + * used. + */ + int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv); +}; + +int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, + u64 addr, u64 range, + struct drm_gem_object *obj, u64 offset); + +int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, + u64 addr, u64 range); + +void drm_gpuva_map(struct drm_gpuvm *gpuvm, + struct drm_gpuva *va, + struct drm_gpuva_op_map *op); + +void drm_gpuva_remap(struct drm_gpuva *prev, + struct drm_gpuva *next, + struct drm_gpuva_op_remap *op); + +void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op); + +/** + * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of + * the unmap stage of a remap op. + * @op: Remap op. + * @start_addr: Output pointer for the start of the required unmap. + * @range: Output pointer for the length of the required unmap. + * + * The given start address and range will be set such that they represent the + * range of the address space that was previously covered by the mapping being + * re-mapped, but is now empty. + */ +static inline void +drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op, + u64 *start_addr, u64 *range) +{ + const u64 va_start = op->prev ? + op->prev->va.addr + op->prev->va.range : + op->unmap->va->va.addr; + const u64 va_end = op->next ? + op->next->va.addr : + op->unmap->va->va.addr + op->unmap->va->va.range; + + if (start_addr) + *start_addr = va_start; + if (range) + *range = va_end - va_start; +} + +#endif /* __DRM_GPUVM_H__ */ diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index 6ed61c371f6c..171760b6c4a1 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -110,17 +110,6 @@ enum drm_ioctl_flags { */ DRM_ROOT_ONLY = BIT(2), /** - * @DRM_UNLOCKED: - * - * Whether &drm_ioctl_desc.func should be called with the DRM BKL held - * or not. Enforced as the default for all modern drivers, hence there - * should never be a need to set this flag. - * - * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the - * only legacy IOCTL which needs this. - */ - DRM_UNLOCKED = BIT(4), - /** * @DRM_RENDER_ALLOW: * * This is used for all ioctl needed for rendering only, for drivers diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index 514c8a7a32f0..ba483c87f0e7 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -3,6 +3,8 @@ #ifndef DRM_KUNIT_HELPERS_H_ #define DRM_KUNIT_HELPERS_H_ +#include <linux/device.h> + #include <kunit/test.h> struct drm_device; @@ -51,7 +53,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, { struct drm_driver *driver; - driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL); + driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, driver); driver->driver_features = features; diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h deleted file mode 100644 index 0fc85418aad8..000000000000 --- a/include/drm/drm_legacy.h +++ /dev/null @@ -1,331 +0,0 @@ -#ifndef __DRM_DRM_LEGACY_H__ -#define __DRM_DRM_LEGACY_H__ -/* - * Legacy driver interfaces for the Direct Rendering Manager - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * Copyright (c) 2009-2010, Code Aurora Forum. - * All rights reserved. - * Copyright © 2014 Intel Corporation - * Daniel Vetter <daniel.vetter@ffwll.ch> - * - * Author: Rickard E. (Rik) Faith <faith@valinux.com> - * Author: Gareth Hughes <gareth@valinux.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/agp_backend.h> - -#include <drm/drm.h> -#include <drm/drm_auth.h> - -struct drm_device; -struct drm_driver; -struct file; -struct pci_driver; - -/* - * Legacy Support for palateontologic DRM drivers - * - * If you add a new driver and it uses any of these functions or structures, - * you're doing it terribly wrong. - */ - -/* - * Hash-table Support - */ - -struct drm_hash_item { - struct hlist_node head; - unsigned long key; -}; - -struct drm_open_hash { - struct hlist_head *table; - u8 order; -}; - -/** - * DMA buffer. - */ -struct drm_buf { - int idx; /**< Index into master buflist */ - int total; /**< Buffer size */ - int order; /**< log-base-2(total) */ - int used; /**< Amount of buffer in use (for DMA) */ - unsigned long offset; /**< Byte offset (used internally) */ - void *address; /**< Address of buffer */ - unsigned long bus_address; /**< Bus address of buffer */ - struct drm_buf *next; /**< Kernel-only: used for free list */ - __volatile__ int waiting; /**< On kernel DMA queue */ - __volatile__ int pending; /**< On hardware DMA queue */ - struct drm_file *file_priv; /**< Private of holding file descr */ - int context; /**< Kernel queue for this buffer */ - int while_locked; /**< Dispatch this buffer while locked */ - enum { - DRM_LIST_NONE = 0, - DRM_LIST_FREE = 1, - DRM_LIST_WAIT = 2, - DRM_LIST_PEND = 3, - DRM_LIST_PRIO = 4, - DRM_LIST_RECLAIM = 5 - } list; /**< Which list we're on */ - - int dev_priv_size; /**< Size of buffer private storage */ - void *dev_private; /**< Per-buffer private storage */ -}; - -typedef struct drm_dma_handle { - dma_addr_t busaddr; - void *vaddr; - size_t size; -} drm_dma_handle_t; - -/** - * Buffer entry. There is one of this for each buffer size order. - */ -struct drm_buf_entry { - int buf_size; /**< size */ - int buf_count; /**< number of buffers */ - struct drm_buf *buflist; /**< buffer list */ - int seg_count; - int page_order; - struct drm_dma_handle **seglist; - - int low_mark; /**< Low water mark */ - int high_mark; /**< High water mark */ -}; - -/** - * DMA data. - */ -struct drm_device_dma { - - struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ - int buf_count; /**< total number of buffers */ - struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ - int seg_count; - int page_count; /**< number of pages */ - unsigned long *pagelist; /**< page list */ - unsigned long byte_count; - enum { - _DRM_DMA_USE_AGP = 0x01, - _DRM_DMA_USE_SG = 0x02, - _DRM_DMA_USE_FB = 0x04, - _DRM_DMA_USE_PCI_RO = 0x08 - } flags; - -}; - -/** - * Scatter-gather memory. - */ -struct drm_sg_mem { - unsigned long handle; - void *virtual; - int pages; - struct page **pagelist; - dma_addr_t *busaddr; -}; - -/** - * Kernel side of a mapping - */ -struct drm_local_map { - dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/ - unsigned long size; /**< Requested physical size (bytes) */ - enum drm_map_type type; /**< Type of memory to map */ - enum drm_map_flags flags; /**< Flags */ - void *handle; /**< User-space: "Handle" to pass to mmap() */ - /**< Kernel-space: kernel-virtual address */ - int mtrr; /**< MTRR slot used */ -}; - -typedef struct drm_local_map drm_local_map_t; - -/** - * Mappings list - */ -struct drm_map_list { - struct list_head head; /**< list head */ - struct drm_hash_item hash; - struct drm_local_map *map; /**< mapping */ - uint64_t user_token; - struct drm_master *master; -}; - -int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, struct drm_local_map **map_p); -struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token); -void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); -int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); -struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); -int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); - -int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); -int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); - -/** - * Test that the hardware lock is held by the caller, returning otherwise. - * - * \param dev DRM device. - * \param filp file pointer of the caller. - */ -#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ -do { \ - if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ - _file_priv->master->lock.file_priv != _file_priv) { \ - DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ - __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ - _file_priv->master->lock.file_priv, _file_priv); \ - return -EINVAL; \ - } \ -} while (0) - -void drm_legacy_idlelock_take(struct drm_lock_data *lock); -void drm_legacy_idlelock_release(struct drm_lock_data *lock); - -/* drm_irq.c */ -int drm_legacy_irq_uninstall(struct drm_device *dev); - -/* drm_pci.c */ - -#ifdef CONFIG_PCI - -int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver); -void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver); - -#else - -static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, - size_t size, size_t align) -{ - return NULL; -} - -static inline void drm_pci_free(struct drm_device *dev, - struct drm_dma_handle *dmah) -{ -} - -static inline int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - return -EINVAL; -} - -static inline void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ -} - -#endif - -/* - * AGP Support - */ - -struct drm_agp_head { - struct agp_kern_info agp_info; - struct list_head memory; - unsigned long mode; - struct agp_bridge_data *bridge; - int enabled; - int acquired; - unsigned long base; - int agp_mtrr; - int cant_use_aperture; - unsigned long page_mask; -}; - -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) -struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev); -int drm_legacy_agp_acquire(struct drm_device *dev); -int drm_legacy_agp_release(struct drm_device *dev); -int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info); -int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -#else -static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) -{ - return NULL; -} - -static inline int drm_legacy_agp_acquire(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_release(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_enable(struct drm_device *dev, - struct drm_agp_mode mode) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_info(struct drm_device *dev, - struct drm_agp_info *info) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_alloc(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_free(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_unbind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_bind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} -#endif - -/* drm_memory.c */ -void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); -void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); -void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); - -#endif /* __DRM_DRM_LEGACY_H__ */ diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h index 816f196b3d4c..e8e0f8d39f3a 100644 --- a/include/drm/drm_mipi_dbi.h +++ b/include/drm/drm_mipi_dbi.h @@ -12,6 +12,7 @@ #include <drm/drm_device.h> #include <drm/drm_simple_kms_helper.h> +struct drm_format_conv_state; struct drm_rect; struct gpio_desc; struct iosys_map; @@ -192,7 +193,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data, size_t len); int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swap); + struct drm_rect *clip, bool swap, + struct drm_format_conv_state *fmtcnv_state); /** * mipi_dbi_command - MIPI DCS command with optional parameter(s) diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index c9df0407980c..c0aec0d4d664 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -168,6 +168,7 @@ struct mipi_dsi_device_info { * struct mipi_dsi_device - DSI peripheral device * @host: DSI host for this peripheral * @dev: driver model device node for this peripheral + * @attached: the DSI device has been successfully attached * @name: DSI peripheral chip type * @channel: virtual channel assigned to the peripheral * @format: pixel format for video mode @@ -184,6 +185,7 @@ struct mipi_dsi_device_info { struct mipi_dsi_device { struct mipi_dsi_host *host; struct device dev; + bool attached; char name[DSI_DEV_NAME_SIZE]; unsigned int channel; diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h index 912f1e415685..08d7a7f0188f 100644 --- a/include/drm/drm_mode_object.h +++ b/include/drm/drm_mode_object.h @@ -60,7 +60,7 @@ struct drm_mode_object { void (*free_cb)(struct kref *kref); }; -#define DRM_OBJECT_MAX_PROPERTY 24 +#define DRM_OBJECT_MAX_PROPERTY 64 /** * struct drm_object_properties - property tracking for &drm_mode_object */ diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index e3c3ac615909..881b03e4dc28 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -134,7 +134,7 @@ struct drm_crtc_helper_funcs { * Since this function is both called from the check phase of an atomic * commit, and the mode validation in the probe paths it is not allowed * to look at anything else but the passed-in mode, and validate it - * against configuration-invariant hardward constraints. Any further + * against configuration-invariant hardware constraints. Any further * limits which depend upon the configuration can only be checked in * @mode_fixup or @atomic_check. * @@ -550,7 +550,7 @@ struct drm_encoder_helper_funcs { * Since this function is both called from the check phase of an atomic * commit, and the mode validation in the probe paths it is not allowed * to look at anything else but the passed-in mode, and validate it - * against configuration-invariant hardward constraints. Any further + * against configuration-invariant hardware constraints. Any further * limits which depend upon the configuration can only be checked in * @mode_fixup or @atomic_check. * @@ -1154,6 +1154,11 @@ struct drm_connector_helper_funcs { * This operation is optional. * * This callback is used by the drm_kms_helper_poll_enable() helpers. + * + * This operation does not need to perform any hpd state tracking as + * the DRM core handles that maintenance and ensures the calls to enable + * and disable hpd are balanced. + * */ void (*enable_hpd)(struct drm_connector *connector); @@ -1165,6 +1170,11 @@ struct drm_connector_helper_funcs { * This operation is optional. * * This callback is used by the drm_kms_helper_poll_disable() helpers. + * + * This operation does not need to perform any hpd state tracking as + * the DRM core handles that maintenance and ensures the calls to enable + * and disable hpd are balanced. + * */ void (*disable_hpd)(struct drm_connector *connector); }; @@ -1464,7 +1474,7 @@ struct drm_mode_config_helper_funcs { * swapped into the various state pointers. The passed in state * therefore contains copies of the old/previous state. This hook should * commit the new state into hardware. Note that the helpers have - * already waited for preceeding atomic commits and fences, but drivers + * already waited for preceding atomic commits and fences, but drivers * can add more waiting calls at the start of their implementation, e.g. * to wait for driver-internal request for implicit syncing, before * starting to commit the update to the hardware. diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 79d62856defb..641fe298052d 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -116,6 +116,10 @@ struct drm_plane_state { /** @src_h: height of visible portion of plane (in 16.16) */ uint32_t src_h, src_w; + /** @hotspot_x: x offset to mouse cursor hotspot */ + /** @hotspot_y: y offset to mouse cursor hotspot */ + int32_t hotspot_x, hotspot_y; + /** * @alpha: * Opacity of the plane with 0 as completely transparent and 0xffff as @@ -191,6 +195,16 @@ struct drm_plane_state { struct drm_property_blob *fb_damage_clips; /** + * @ignore_damage_clips: + * + * Set by drivers to indicate the drm_atomic_helper_damage_iter_init() + * helper that the @fb_damage_clips blob property should be ignored. + * + * See :ref:`damage_tracking_properties` for more information. + */ + bool ignore_damage_clips; + + /** * @src: * * source coordinates of the plane (in 16.16). @@ -237,6 +251,13 @@ struct drm_plane_state { /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; + + /** + * @color_mgmt_changed: Color management properties have changed. Used + * by the atomic helpers and drivers to steer the atomic commit control + * flow. + */ + bool color_mgmt_changed : 1; }; static inline struct drm_rect @@ -748,6 +769,16 @@ struct drm_plane { * scaling. */ struct drm_property *scaling_filter_property; + + /** + * @hotspot_x_property: property to set mouse hotspot x offset. + */ + struct drm_property *hotspot_x_property; + + /** + * @hotspot_y_property: property to set mouse hotspot y offset. + */ + struct drm_property *hotspot_y_property; }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 3a574e8cd22f..75f9c4830564 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -26,7 +26,6 @@ #include <linux/types.h> -struct drm_atomic_state; struct drm_crtc; struct drm_framebuffer; struct drm_modeset_acquire_ctx; @@ -42,7 +41,6 @@ int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *cr int drm_plane_helper_disable_primary(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx); void drm_plane_helper_destroy(struct drm_plane *plane); -int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state); /** * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index a7abf9f3e697..2a1d01e5b56b 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -60,12 +60,19 @@ enum dma_data_direction; struct drm_device; struct drm_gem_object; +struct drm_file; /* core prime functions */ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, struct dma_buf_export_info *exp_info); void drm_gem_dmabuf_release(struct dma_buf *dma_buf); +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle); +int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, uint32_t flags, + int *prime_fd); + /* helper functions for exporting */ int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach); diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index a93a387f8a1a..dd4883df876a 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -453,7 +453,7 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, /* Helper for struct drm_device based logging. */ #define __drm_printk(drm, level, type, fmt, ...) \ - dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__) + dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__) #define drm_info(drm, fmt, ...) \ diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 65bc9710a470..082f29156b3e 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -279,6 +279,12 @@ struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, const void *data); struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, uint32_t id); +int drm_property_replace_blob_from_id(struct drm_device *dev, + struct drm_property_blob **blob, + uint64_t blob_id, + ssize_t expected_size, + ssize_t expected_elem_size, + bool *replaced); int drm_property_replace_global_blob(struct drm_device *dev, struct drm_property_blob **replace, size_t length, diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index f9544d9b670d..5acc64954a88 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -63,13 +63,12 @@ struct drm_file; * to an array, and as such should start at 0. */ enum drm_sched_priority { - DRM_SCHED_PRIORITY_MIN, - DRM_SCHED_PRIORITY_NORMAL, - DRM_SCHED_PRIORITY_HIGH, DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_HIGH, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_LOW, - DRM_SCHED_PRIORITY_COUNT, - DRM_SCHED_PRIORITY_UNSET = -2 + DRM_SCHED_PRIORITY_COUNT }; /* Used to chose between FIFO and RR jobs scheduling */ @@ -321,6 +320,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * @sched: the scheduler instance on which this job is scheduled. * @s_fence: contains the fences for the scheduling of job. * @finish_cb: the callback for the finished fence. + * @credits: the number of credits this job contributes to the scheduler * @work: Helper to reschdeule job kill to different context. * @id: a unique id assigned to each job scheduled on the scheduler. * @karma: increment on every hang caused by this job. If this exceeds the hang @@ -340,6 +340,8 @@ struct drm_sched_job { struct drm_gpu_scheduler *sched; struct drm_sched_fence *s_fence; + u32 credits; + /* * work is used only after finish_cb has been used and will not be * accessed anymore. @@ -463,27 +465,42 @@ struct drm_sched_backend_ops { * and it's time to clean it up. */ void (*free_job)(struct drm_sched_job *sched_job); + + /** + * @update_job_credits: Called when the scheduler is considering this + * job for execution. + * + * This callback returns the number of credits the job would take if + * pushed to the hardware. Drivers may use this to dynamically update + * the job's credit count. For instance, deduct the number of credits + * for already signalled native fences. + * + * This callback is optional. + */ + u32 (*update_job_credits)(struct drm_sched_job *sched_job); }; /** * struct drm_gpu_scheduler - scheduler instance-specific data * * @ops: backend operations provided by the driver. - * @hw_submission_limit: the max size of the hardware queue. + * @credit_limit: the credit limit of this scheduler + * @credit_count: the current credit count of this scheduler * @timeout: the time after which a job is removed from the scheduler. * @name: name of the ring for which this scheduler is being used. - * @sched_rq: priority wise array of run queues. - * @wake_up_worker: the wait queue on which the scheduler sleeps until a job - * is ready to be scheduled. + * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, + * as there's usually one run-queue per priority, but could be less. + * @sched_rq: An allocated array of run-queues of size @num_rqs; * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler * waits on this wait queue until all the scheduled jobs are * finished. - * @hw_rq_count: the number of jobs currently in the hardware queue. * @job_id_count: used to assign unique id to the each job. + * @submit_wq: workqueue used to queue @work_run_job and @work_free_job * @timeout_wq: workqueue used to queue @work_tdr + * @work_run_job: work which calls run_job op of each scheduler. + * @work_free_job: work which calls free_job op of each scheduler. * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the * timeout interval is over. - * @thread: the kthread on which the scheduler which run. * @pending_list: the list of jobs which are currently in the job queue. * @job_list_lock: lock to protect the pending_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked @@ -492,23 +509,27 @@ struct drm_sched_backend_ops { * @_score: score used when the driver doesn't provide one * @ready: marks if the underlying HW is ready to work * @free_guilty: A hit to time out handler to free the guilty job. + * @pause_submit: pause queuing of @work_run_job on @submit_wq + * @own_submit_wq: scheduler owns allocation of @submit_wq * @dev: system &struct device * * One scheduler is implemented for each hardware ring. */ struct drm_gpu_scheduler { const struct drm_sched_backend_ops *ops; - uint32_t hw_submission_limit; + u32 credit_limit; + atomic_t credit_count; long timeout; const char *name; - struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; - wait_queue_head_t wake_up_worker; + u32 num_rqs; + struct drm_sched_rq **sched_rq; wait_queue_head_t job_scheduled; - atomic_t hw_rq_count; atomic64_t job_id_count; + struct workqueue_struct *submit_wq; struct workqueue_struct *timeout_wq; + struct work_struct work_run_job; + struct work_struct work_free_job; struct delayed_work work_tdr; - struct task_struct *thread; struct list_head pending_list; spinlock_t job_list_lock; int hang_limit; @@ -516,19 +537,22 @@ struct drm_gpu_scheduler { atomic_t _score; bool ready; bool free_guilty; + bool pause_submit; + bool own_submit_wq; struct device *dev; }; int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, - uint32_t hw_submission, unsigned hang_limit, + struct workqueue_struct *submit_wq, + u32 num_rqs, u32 credit_limit, unsigned int hang_limit, long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name, struct device *dev); void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, - void *owner); + u32 credits, void *owner); void drm_sched_job_arm(struct drm_sched_job *job); int drm_sched_job_add_dependency(struct drm_sched_job *job, struct dma_fence *fence); @@ -548,8 +572,12 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, struct drm_gpu_scheduler **sched_list, unsigned int num_sched_list); +void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); void drm_sched_job_cleanup(struct drm_sched_job *job); -void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity); +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index e1e10dfbb661..fcf1849aa47c 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -689,14 +689,18 @@ #define INTEL_RPLU_IDS(info) \ INTEL_VGA_DEVICE(0xA721, info), \ INTEL_VGA_DEVICE(0xA7A1, info), \ - INTEL_VGA_DEVICE(0xA7A9, info) + INTEL_VGA_DEVICE(0xA7A9, info), \ + INTEL_VGA_DEVICE(0xA7AC, info), \ + INTEL_VGA_DEVICE(0xA7AD, info) /* RPL-P */ #define INTEL_RPLP_IDS(info) \ INTEL_RPLU_IDS(info), \ INTEL_VGA_DEVICE(0xA720, info), \ INTEL_VGA_DEVICE(0xA7A0, info), \ - INTEL_VGA_DEVICE(0xA7A8, info) + INTEL_VGA_DEVICE(0xA7A8, info), \ + INTEL_VGA_DEVICE(0xA7AA, info), \ + INTEL_VGA_DEVICE(0xA7AB, info) /* DG2 */ #define INTEL_DG2_G10_IDS(info) \ @@ -714,7 +718,11 @@ INTEL_VGA_DEVICE(0x56A5, info), \ INTEL_VGA_DEVICE(0x56A6, info), \ INTEL_VGA_DEVICE(0x56B0, info), \ - INTEL_VGA_DEVICE(0x56B1, info) + INTEL_VGA_DEVICE(0x56B1, info), \ + INTEL_VGA_DEVICE(0x56BA, info), \ + INTEL_VGA_DEVICE(0x56BB, info), \ + INTEL_VGA_DEVICE(0x56BC, info), \ + INTEL_VGA_DEVICE(0x56BD, info) #define INTEL_DG2_G12_IDS(info) \ INTEL_VGA_DEVICE(0x5696, info), \ @@ -730,7 +738,8 @@ INTEL_DG2_G12_IDS(info) #define INTEL_ATS_M150_IDS(info) \ - INTEL_VGA_DEVICE(0x56C0, info) + INTEL_VGA_DEVICE(0x56C0, info), \ + INTEL_VGA_DEVICE(0x56C2, info) #define INTEL_ATS_M75_IDS(info) \ INTEL_VGA_DEVICE(0x56C1, info) @@ -738,18 +747,14 @@ #define INTEL_ATS_M_IDS(info) \ INTEL_ATS_M150_IDS(info), \ INTEL_ATS_M75_IDS(info) + /* MTL */ -#define INTEL_MTL_M_IDS(info) \ +#define INTEL_MTL_IDS(info) \ INTEL_VGA_DEVICE(0x7D40, info), \ - INTEL_VGA_DEVICE(0x7D60, info) - -#define INTEL_MTL_P_IDS(info) \ INTEL_VGA_DEVICE(0x7D45, info), \ INTEL_VGA_DEVICE(0x7D55, info), \ + INTEL_VGA_DEVICE(0x7D60, info), \ + INTEL_VGA_DEVICE(0x7D67, info), \ INTEL_VGA_DEVICE(0x7DD5, info) -#define INTEL_MTL_IDS(info) \ - INTEL_MTL_M_IDS(info), \ - INTEL_MTL_P_IDS(info) - #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h index a702b6ec17f7..7d96985f2d05 100644 --- a/include/drm/i915_pxp_tee_interface.h +++ b/include/drm/i915_pxp_tee_interface.h @@ -22,8 +22,10 @@ struct i915_pxp_component_ops { */ struct module *owner; - int (*send)(struct device *dev, const void *message, size_t size); - int (*recv)(struct device *dev, void *buffer, size_t size); + int (*send)(struct device *dev, const void *message, size_t size, + unsigned long timeout_ms); + int (*recv)(struct device *dev, void *buffer, size_t size, + unsigned long timeout_ms); ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id, struct scatterlist *sg_in, size_t total_in_len, struct scatterlist *sg_out); diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h index 30a347e5aa11..4490d43c63e3 100644 --- a/include/drm/ttm/ttm_pool.h +++ b/include/drm/ttm/ttm_pool.h @@ -74,7 +74,7 @@ struct ttm_pool { bool use_dma32; struct { - struct ttm_pool_type orders[MAX_ORDER + 1]; + struct ttm_pool_type orders[NR_PAGE_ORDERS]; } caching[TTM_NUM_CACHING_TYPES]; }; diff --git a/include/drm/xe_pciids.h b/include/drm/xe_pciids.h new file mode 100644 index 000000000000..de1a344737bc --- /dev/null +++ b/include/drm/xe_pciids.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _XE_PCIIDS_H_ +#define _XE_PCIIDS_H_ + +/* + * Lists below can be turned into initializers for a struct pci_device_id + * by defining INTEL_VGA_DEVICE: + * + * #define INTEL_VGA_DEVICE(id, info) { \ + * 0x8086, id, \ + * ~0, ~0, \ + * 0x030000, 0xff0000, \ + * (unsigned long) info } + * + * And then calling like: + * + * XE_TGL_12_GT1_IDS(INTEL_VGA_DEVICE, ## __VA_ARGS__) + * + * To turn them into something else, just provide a different macro passed as + * first argument. + */ + +/* TGL */ +#define XE_TGL_GT1_IDS(MACRO__, ...) \ + MACRO__(0x9A60, ## __VA_ARGS__), \ + MACRO__(0x9A68, ## __VA_ARGS__), \ + MACRO__(0x9A70, ## __VA_ARGS__) + +#define XE_TGL_GT2_IDS(MACRO__, ...) \ + MACRO__(0x9A40, ## __VA_ARGS__), \ + MACRO__(0x9A49, ## __VA_ARGS__), \ + MACRO__(0x9A59, ## __VA_ARGS__), \ + MACRO__(0x9A78, ## __VA_ARGS__), \ + MACRO__(0x9AC0, ## __VA_ARGS__), \ + MACRO__(0x9AC9, ## __VA_ARGS__), \ + MACRO__(0x9AD9, ## __VA_ARGS__), \ + MACRO__(0x9AF8, ## __VA_ARGS__) + +#define XE_TGL_IDS(MACRO__, ...) \ + XE_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__),\ + XE_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__) + +/* RKL */ +#define XE_RKL_IDS(MACRO__, ...) \ + MACRO__(0x4C80, ## __VA_ARGS__), \ + MACRO__(0x4C8A, ## __VA_ARGS__), \ + MACRO__(0x4C8B, ## __VA_ARGS__), \ + MACRO__(0x4C8C, ## __VA_ARGS__), \ + MACRO__(0x4C90, ## __VA_ARGS__), \ + MACRO__(0x4C9A, ## __VA_ARGS__) + +/* DG1 */ +#define XE_DG1_IDS(MACRO__, ...) \ + MACRO__(0x4905, ## __VA_ARGS__), \ + MACRO__(0x4906, ## __VA_ARGS__), \ + MACRO__(0x4907, ## __VA_ARGS__), \ + MACRO__(0x4908, ## __VA_ARGS__), \ + MACRO__(0x4909, ## __VA_ARGS__) + +/* ADL-S */ +#define XE_ADLS_IDS(MACRO__, ...) \ + MACRO__(0x4680, ## __VA_ARGS__), \ + MACRO__(0x4682, ## __VA_ARGS__), \ + MACRO__(0x4688, ## __VA_ARGS__), \ + MACRO__(0x468A, ## __VA_ARGS__), \ + MACRO__(0x468B, ## __VA_ARGS__), \ + MACRO__(0x4690, ## __VA_ARGS__), \ + MACRO__(0x4692, ## __VA_ARGS__), \ + MACRO__(0x4693, ## __VA_ARGS__) + +/* ADL-P */ +#define XE_ADLP_IDS(MACRO__, ...) \ + MACRO__(0x46A0, ## __VA_ARGS__), \ + MACRO__(0x46A1, ## __VA_ARGS__), \ + MACRO__(0x46A2, ## __VA_ARGS__), \ + MACRO__(0x46A3, ## __VA_ARGS__), \ + MACRO__(0x46A6, ## __VA_ARGS__), \ + MACRO__(0x46A8, ## __VA_ARGS__), \ + MACRO__(0x46AA, ## __VA_ARGS__), \ + MACRO__(0x462A, ## __VA_ARGS__), \ + MACRO__(0x4626, ## __VA_ARGS__), \ + MACRO__(0x4628, ## __VA_ARGS__), \ + MACRO__(0x46B0, ## __VA_ARGS__), \ + MACRO__(0x46B1, ## __VA_ARGS__), \ + MACRO__(0x46B2, ## __VA_ARGS__), \ + MACRO__(0x46B3, ## __VA_ARGS__), \ + MACRO__(0x46C0, ## __VA_ARGS__), \ + MACRO__(0x46C1, ## __VA_ARGS__), \ + MACRO__(0x46C2, ## __VA_ARGS__), \ + MACRO__(0x46C3, ## __VA_ARGS__) + +/* ADL-N */ +#define XE_ADLN_IDS(MACRO__, ...) \ + MACRO__(0x46D0, ## __VA_ARGS__), \ + MACRO__(0x46D1, ## __VA_ARGS__), \ + MACRO__(0x46D2, ## __VA_ARGS__) + +/* RPL-S */ +#define XE_RPLS_IDS(MACRO__, ...) \ + MACRO__(0xA780, ## __VA_ARGS__), \ + MACRO__(0xA781, ## __VA_ARGS__), \ + MACRO__(0xA782, ## __VA_ARGS__), \ + MACRO__(0xA783, ## __VA_ARGS__), \ + MACRO__(0xA788, ## __VA_ARGS__), \ + MACRO__(0xA789, ## __VA_ARGS__), \ + MACRO__(0xA78A, ## __VA_ARGS__), \ + MACRO__(0xA78B, ## __VA_ARGS__) + +/* RPL-U */ +#define XE_RPLU_IDS(MACRO__, ...) \ + MACRO__(0xA721, ## __VA_ARGS__), \ + MACRO__(0xA7A1, ## __VA_ARGS__), \ + MACRO__(0xA7A9, ## __VA_ARGS__), \ + MACRO__(0xA7AC, ## __VA_ARGS__), \ + MACRO__(0xA7AD, ## __VA_ARGS__) + +/* RPL-P */ +#define XE_RPLP_IDS(MACRO__, ...) \ + XE_RPLU_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0xA720, ## __VA_ARGS__), \ + MACRO__(0xA7A0, ## __VA_ARGS__), \ + MACRO__(0xA7A8, ## __VA_ARGS__), \ + MACRO__(0xA7AA, ## __VA_ARGS__), \ + MACRO__(0xA7AB, ## __VA_ARGS__) + +/* DG2 */ +#define XE_DG2_G10_IDS(MACRO__, ...) \ + MACRO__(0x5690, ## __VA_ARGS__), \ + MACRO__(0x5691, ## __VA_ARGS__), \ + MACRO__(0x5692, ## __VA_ARGS__), \ + MACRO__(0x56A0, ## __VA_ARGS__), \ + MACRO__(0x56A1, ## __VA_ARGS__), \ + MACRO__(0x56A2, ## __VA_ARGS__) + +#define XE_DG2_G11_IDS(MACRO__, ...) \ + MACRO__(0x5693, ## __VA_ARGS__), \ + MACRO__(0x5694, ## __VA_ARGS__), \ + MACRO__(0x5695, ## __VA_ARGS__), \ + MACRO__(0x56A5, ## __VA_ARGS__), \ + MACRO__(0x56A6, ## __VA_ARGS__), \ + MACRO__(0x56B0, ## __VA_ARGS__), \ + MACRO__(0x56B1, ## __VA_ARGS__), \ + MACRO__(0x56BA, ## __VA_ARGS__), \ + MACRO__(0x56BB, ## __VA_ARGS__), \ + MACRO__(0x56BC, ## __VA_ARGS__), \ + MACRO__(0x56BD, ## __VA_ARGS__) + +#define XE_DG2_G12_IDS(MACRO__, ...) \ + MACRO__(0x5696, ## __VA_ARGS__), \ + MACRO__(0x5697, ## __VA_ARGS__), \ + MACRO__(0x56A3, ## __VA_ARGS__), \ + MACRO__(0x56A4, ## __VA_ARGS__), \ + MACRO__(0x56B2, ## __VA_ARGS__), \ + MACRO__(0x56B3, ## __VA_ARGS__) + +#define XE_DG2_IDS(MACRO__, ...) \ + XE_DG2_G10_IDS(MACRO__, ## __VA_ARGS__),\ + XE_DG2_G11_IDS(MACRO__, ## __VA_ARGS__),\ + XE_DG2_G12_IDS(MACRO__, ## __VA_ARGS__) + +#define XE_ATS_M150_IDS(MACRO__, ...) \ + MACRO__(0x56C0, ## __VA_ARGS__), \ + MACRO__(0x56C2, ## __VA_ARGS__) + +#define XE_ATS_M75_IDS(MACRO__, ...) \ + MACRO__(0x56C1, ## __VA_ARGS__) + +#define XE_ATS_M_IDS(MACRO__, ...) \ + XE_ATS_M150_IDS(MACRO__, ## __VA_ARGS__),\ + XE_ATS_M75_IDS(MACRO__, ## __VA_ARGS__) + +/* MTL / ARL */ +#define XE_MTL_IDS(MACRO__, ...) \ + MACRO__(0x7D40, ## __VA_ARGS__), \ + MACRO__(0x7D45, ## __VA_ARGS__), \ + MACRO__(0x7D55, ## __VA_ARGS__), \ + MACRO__(0x7D60, ## __VA_ARGS__), \ + MACRO__(0x7D67, ## __VA_ARGS__), \ + MACRO__(0x7DD5, ## __VA_ARGS__) + +#define XE_LNL_IDS(MACRO__, ...) \ + MACRO__(0x6420, ## __VA_ARGS__), \ + MACRO__(0x64A0, ## __VA_ARGS__), \ + MACRO__(0x64B0, ## __VA_ARGS__) + +#endif diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h index be12e1dd1f38..51e0f6059410 100644 --- a/include/dt-bindings/arm/qcom,ids.h +++ b/include/dt-bindings/arm/qcom,ids.h @@ -193,6 +193,7 @@ #define QCOM_ID_SDA439 363 #define QCOM_ID_SDA429 364 #define QCOM_ID_SM7150 365 +#define QCOM_ID_SM7150P 366 #define QCOM_ID_IPQ8070 375 #define QCOM_ID_IPQ8071 376 #define QCOM_ID_QM215 386 @@ -203,6 +204,9 @@ #define QCOM_ID_SM6125 394 #define QCOM_ID_IPQ8070A 395 #define QCOM_ID_IPQ8071A 396 +#define QCOM_ID_IPQ8172 397 +#define QCOM_ID_IPQ8173 398 +#define QCOM_ID_IPQ8174 399 #define QCOM_ID_IPQ6018 402 #define QCOM_ID_IPQ6028 403 #define QCOM_ID_SDM429W 416 @@ -233,6 +237,7 @@ #define QCOM_ID_SM8450_3 482 #define QCOM_ID_SC7280 487 #define QCOM_ID_SC7180P 495 +#define QCOM_ID_QCM6490 497 #define QCOM_ID_IPQ5000 503 #define QCOM_ID_IPQ0509 504 #define QCOM_ID_IPQ0518 505 @@ -250,6 +255,7 @@ #define QCOM_ID_SA8775P 534 #define QCOM_ID_QRU1000 539 #define QCOM_ID_QDU1000 545 +#define QCOM_ID_SM8650 557 #define QCOM_ID_SM4450 568 #define QCOM_ID_QDU1010 587 #define QCOM_ID_QRU1032 588 diff --git a/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h new file mode 100644 index 000000000000..861a331963ac --- /dev/null +++ b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ +/* + * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved. + * Author: Yu Tu <yu.tu@amlogic.com> + */ + +#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H +#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H + +#define CLKID_RTC_32K_CLKIN 0 +#define CLKID_RTC_32K_DIV 1 +#define CLKID_RTC_32K_SEL 2 +#define CLKID_RTC_32K_XATL 3 +#define CLKID_RTC 4 +#define CLKID_SYS_CLK_B_SEL 5 +#define CLKID_SYS_CLK_B_DIV 6 +#define CLKID_SYS_CLK_B 7 +#define CLKID_SYS_CLK_A_SEL 8 +#define CLKID_SYS_CLK_A_DIV 9 +#define CLKID_SYS_CLK_A 10 +#define CLKID_SYS 11 +#define CLKID_CECA_32K_CLKIN 12 +#define CLKID_CECA_32K_DIV 13 +#define CLKID_CECA_32K_SEL_PRE 14 +#define CLKID_CECA_32K_SEL 15 +#define CLKID_CECA_32K_CLKOUT 16 +#define CLKID_CECB_32K_CLKIN 17 +#define CLKID_CECB_32K_DIV 18 +#define CLKID_CECB_32K_SEL_PRE 19 +#define CLKID_CECB_32K_SEL 20 +#define CLKID_CECB_32K_CLKOUT 21 +#define CLKID_SC_CLK_SEL 22 +#define CLKID_SC_CLK_DIV 23 +#define CLKID_SC 24 +#define CLKID_12_24M 25 +#define CLKID_12M_CLK_DIV 26 +#define CLKID_12_24M_CLK_SEL 27 +#define CLKID_VID_PLL_DIV 28 +#define CLKID_VID_PLL_SEL 29 +#define CLKID_VID_PLL 30 +#define CLKID_VCLK_SEL 31 +#define CLKID_VCLK2_SEL 32 +#define CLKID_VCLK_INPUT 33 +#define CLKID_VCLK2_INPUT 34 +#define CLKID_VCLK_DIV 35 +#define CLKID_VCLK2_DIV 36 +#define CLKID_VCLK 37 +#define CLKID_VCLK2 38 +#define CLKID_VCLK_DIV1 39 +#define CLKID_VCLK_DIV2_EN 40 +#define CLKID_VCLK_DIV4_EN 41 +#define CLKID_VCLK_DIV6_EN 42 +#define CLKID_VCLK_DIV12_EN 43 +#define CLKID_VCLK2_DIV1 44 +#define CLKID_VCLK2_DIV2_EN 45 +#define CLKID_VCLK2_DIV4_EN 46 +#define CLKID_VCLK2_DIV6_EN 47 +#define CLKID_VCLK2_DIV12_EN 48 +#define CLKID_VCLK_DIV2 49 +#define CLKID_VCLK_DIV4 50 +#define CLKID_VCLK_DIV6 51 +#define CLKID_VCLK_DIV12 52 +#define CLKID_VCLK2_DIV2 53 +#define CLKID_VCLK2_DIV4 54 +#define CLKID_VCLK2_DIV6 55 +#define CLKID_VCLK2_DIV12 56 +#define CLKID_CTS_ENCI_SEL 57 +#define CLKID_CTS_ENCP_SEL 58 +#define CLKID_CTS_VDAC_SEL 59 +#define CLKID_HDMI_TX_SEL 60 +#define CLKID_CTS_ENCI 61 +#define CLKID_CTS_ENCP 62 +#define CLKID_CTS_VDAC 63 +#define CLKID_HDMI_TX 64 +#define CLKID_HDMI_SEL 65 +#define CLKID_HDMI_DIV 66 +#define CLKID_HDMI 67 +#define CLKID_TS_CLK_DIV 68 +#define CLKID_TS 69 +#define CLKID_MALI_0_SEL 70 +#define CLKID_MALI_0_DIV 71 +#define CLKID_MALI_0 72 +#define CLKID_MALI_1_SEL 73 +#define CLKID_MALI_1_DIV 74 +#define CLKID_MALI_1 75 +#define CLKID_MALI_SEL 76 +#define CLKID_VDEC_P0_SEL 77 +#define CLKID_VDEC_P0_DIV 78 +#define CLKID_VDEC_P0 79 +#define CLKID_VDEC_P1_SEL 80 +#define CLKID_VDEC_P1_DIV 81 +#define CLKID_VDEC_P1 82 +#define CLKID_VDEC_SEL 83 +#define CLKID_HEVCF_P0_SEL 84 +#define CLKID_HEVCF_P0_DIV 85 +#define CLKID_HEVCF_P0 86 +#define CLKID_HEVCF_P1_SEL 87 +#define CLKID_HEVCF_P1_DIV 88 +#define CLKID_HEVCF_P1 89 +#define CLKID_HEVCF_SEL 90 +#define CLKID_VPU_0_SEL 91 +#define CLKID_VPU_0_DIV 92 +#define CLKID_VPU_0 93 +#define CLKID_VPU_1_SEL 94 +#define CLKID_VPU_1_DIV 95 +#define CLKID_VPU_1 96 +#define CLKID_VPU 97 +#define CLKID_VPU_CLKB_TMP_SEL 98 +#define CLKID_VPU_CLKB_TMP_DIV 99 +#define CLKID_VPU_CLKB_TMP 100 +#define CLKID_VPU_CLKB_DIV 101 +#define CLKID_VPU_CLKB 102 +#define CLKID_VPU_CLKC_P0_SEL 103 +#define CLKID_VPU_CLKC_P0_DIV 104 +#define CLKID_VPU_CLKC_P0 105 +#define CLKID_VPU_CLKC_P1_SEL 106 +#define CLKID_VPU_CLKC_P1_DIV 107 +#define CLKID_VPU_CLKC_P1 108 +#define CLKID_VPU_CLKC_SEL 109 +#define CLKID_VAPB_0_SEL 110 +#define CLKID_VAPB_0_DIV 111 +#define CLKID_VAPB_0 112 +#define CLKID_VAPB_1_SEL 113 +#define CLKID_VAPB_1_DIV 114 +#define CLKID_VAPB_1 115 +#define CLKID_VAPB 116 +#define CLKID_GE2D 117 +#define CLKID_VDIN_MEAS_SEL 118 +#define CLKID_VDIN_MEAS_DIV 119 +#define CLKID_VDIN_MEAS 120 +#define CLKID_SD_EMMC_C_CLK_SEL 121 +#define CLKID_SD_EMMC_C_CLK_DIV 122 +#define CLKID_SD_EMMC_C 123 +#define CLKID_SD_EMMC_A_CLK_SEL 124 +#define CLKID_SD_EMMC_A_CLK_DIV 125 +#define CLKID_SD_EMMC_A 126 +#define CLKID_SD_EMMC_B_CLK_SEL 127 +#define CLKID_SD_EMMC_B_CLK_DIV 128 +#define CLKID_SD_EMMC_B 129 +#define CLKID_SPICC0_SEL 130 +#define CLKID_SPICC0_DIV 131 +#define CLKID_SPICC0_EN 132 +#define CLKID_PWM_A_SEL 133 +#define CLKID_PWM_A_DIV 134 +#define CLKID_PWM_A 135 +#define CLKID_PWM_B_SEL 136 +#define CLKID_PWM_B_DIV 137 +#define CLKID_PWM_B 138 +#define CLKID_PWM_C_SEL 139 +#define CLKID_PWM_C_DIV 140 +#define CLKID_PWM_C 141 +#define CLKID_PWM_D_SEL 142 +#define CLKID_PWM_D_DIV 143 +#define CLKID_PWM_D 144 +#define CLKID_PWM_E_SEL 145 +#define CLKID_PWM_E_DIV 146 +#define CLKID_PWM_E 147 +#define CLKID_PWM_F_SEL 148 +#define CLKID_PWM_F_DIV 149 +#define CLKID_PWM_F 150 +#define CLKID_PWM_G_SEL 151 +#define CLKID_PWM_G_DIV 152 +#define CLKID_PWM_G 153 +#define CLKID_PWM_H_SEL 154 +#define CLKID_PWM_H_DIV 155 +#define CLKID_PWM_H 156 +#define CLKID_PWM_I_SEL 157 +#define CLKID_PWM_I_DIV 158 +#define CLKID_PWM_I 159 +#define CLKID_PWM_J_SEL 160 +#define CLKID_PWM_J_DIV 161 +#define CLKID_PWM_J 162 +#define CLKID_SARADC_SEL 163 +#define CLKID_SARADC_DIV 164 +#define CLKID_SARADC 165 +#define CLKID_GEN_SEL 166 +#define CLKID_GEN_DIV 167 +#define CLKID_GEN 168 +#define CLKID_DDR 169 +#define CLKID_DOS 170 +#define CLKID_ETHPHY 171 +#define CLKID_MALI 172 +#define CLKID_AOCPU 173 +#define CLKID_AUCPU 174 +#define CLKID_CEC 175 +#define CLKID_SDEMMC_A 176 +#define CLKID_SDEMMC_B 177 +#define CLKID_NAND 178 +#define CLKID_SMARTCARD 179 +#define CLKID_ACODEC 180 +#define CLKID_SPIFC 181 +#define CLKID_MSR 182 +#define CLKID_IR_CTRL 183 +#define CLKID_AUDIO 184 +#define CLKID_ETH 185 +#define CLKID_UART_A 186 +#define CLKID_UART_B 187 +#define CLKID_UART_C 188 +#define CLKID_UART_D 189 +#define CLKID_UART_E 190 +#define CLKID_AIFIFO 191 +#define CLKID_TS_DDR 192 +#define CLKID_TS_PLL 193 +#define CLKID_G2D 194 +#define CLKID_SPICC0 195 +#define CLKID_SPICC1 196 +#define CLKID_USB 197 +#define CLKID_I2C_M_A 198 +#define CLKID_I2C_M_B 199 +#define CLKID_I2C_M_C 200 +#define CLKID_I2C_M_D 201 +#define CLKID_I2C_M_E 202 +#define CLKID_HDMITX_APB 203 +#define CLKID_I2C_S_A 204 +#define CLKID_USB1_TO_DDR 205 +#define CLKID_HDCP22 206 +#define CLKID_MMC_APB 207 +#define CLKID_RSA 208 +#define CLKID_CPU_DEBUG 209 +#define CLKID_VPU_INTR 210 +#define CLKID_DEMOD 211 +#define CLKID_SAR_ADC 212 +#define CLKID_GIC 213 +#define CLKID_PWM_AB 214 +#define CLKID_PWM_CD 215 +#define CLKID_PWM_EF 216 +#define CLKID_PWM_GH 217 +#define CLKID_PWM_IJ 218 +#define CLKID_HDCP22_ESMCLK_SEL 219 +#define CLKID_HDCP22_ESMCLK_DIV 220 +#define CLKID_HDCP22_ESMCLK 221 +#define CLKID_HDCP22_SKPCLK_SEL 222 +#define CLKID_HDCP22_SKPCLK_DIV 223 +#define CLKID_HDCP22_SKPCLK 224 + +#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H */ diff --git a/include/dt-bindings/clock/amlogic,s4-pll-clkc.h b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h new file mode 100644 index 000000000000..af9f110f8b62 --- /dev/null +++ b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ +/* + * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved. + * Author: Yu Tu <yu.tu@amlogic.com> + */ + +#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H +#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H + +#define CLKID_FIXED_PLL_DCO 0 +#define CLKID_FIXED_PLL 1 +#define CLKID_FCLK_DIV2_DIV 2 +#define CLKID_FCLK_DIV2 3 +#define CLKID_FCLK_DIV3_DIV 4 +#define CLKID_FCLK_DIV3 5 +#define CLKID_FCLK_DIV4_DIV 6 +#define CLKID_FCLK_DIV4 7 +#define CLKID_FCLK_DIV5_DIV 8 +#define CLKID_FCLK_DIV5 9 +#define CLKID_FCLK_DIV7_DIV 10 +#define CLKID_FCLK_DIV7 11 +#define CLKID_FCLK_DIV2P5_DIV 12 +#define CLKID_FCLK_DIV2P5 13 +#define CLKID_GP0_PLL_DCO 14 +#define CLKID_GP0_PLL 15 +#define CLKID_HIFI_PLL_DCO 16 +#define CLKID_HIFI_PLL 17 +#define CLKID_HDMI_PLL_DCO 18 +#define CLKID_HDMI_PLL_OD 19 +#define CLKID_HDMI_PLL 20 +#define CLKID_MPLL_50M_DIV 21 +#define CLKID_MPLL_50M 22 +#define CLKID_MPLL_PREDIV 23 +#define CLKID_MPLL0_DIV 24 +#define CLKID_MPLL0 25 +#define CLKID_MPLL1_DIV 26 +#define CLKID_MPLL1 27 +#define CLKID_MPLL2_DIV 28 +#define CLKID_MPLL2 29 +#define CLKID_MPLL3_DIV 30 +#define CLKID_MPLL3 31 + +#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H */ diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h index 387767f4e298..fd09819da2ec 100644 --- a/include/dt-bindings/clock/g12a-clkc.h +++ b/include/dt-bindings/clock/g12a-clkc.h @@ -279,5 +279,13 @@ #define CLKID_MIPI_DSI_PXCLK_DIV 268 #define CLKID_MIPI_DSI_PXCLK_SEL 269 #define CLKID_MIPI_DSI_PXCLK 270 +#define CLKID_CTS_ENCL 271 +#define CLKID_CTS_ENCL_SEL 272 +#define CLKID_MIPI_ISP_DIV 273 +#define CLKID_MIPI_ISP_SEL 274 +#define CLKID_MIPI_ISP 275 +#define CLKID_MIPI_ISP_GATE 276 +#define CLKID_MIPI_ISP_CSI_PHY0 277 +#define CLKID_MIPI_ISP_CSI_PHY1 278 #endif /* __G12A_CLKC_H */ diff --git a/include/dt-bindings/clock/google,gs101.h b/include/dt-bindings/clock/google,gs101.h new file mode 100644 index 000000000000..21adec22387c --- /dev/null +++ b/include/dt-bindings/clock/google,gs101.h @@ -0,0 +1,392 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2023 Linaro Ltd. + * Author: Peter Griffin <peter.griffin@linaro.org> + * + * Device Tree binding constants for Google gs101 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_GOOGLE_GS101_H +#define _DT_BINDINGS_CLOCK_GOOGLE_GS101_H + +/* CMU_TOP PLL */ +#define CLK_FOUT_SHARED0_PLL 1 +#define CLK_FOUT_SHARED1_PLL 2 +#define CLK_FOUT_SHARED2_PLL 3 +#define CLK_FOUT_SHARED3_PLL 4 +#define CLK_FOUT_SPARE_PLL 5 + +/* CMU_TOP MUX */ +#define CLK_MOUT_PLL_SHARED0 6 +#define CLK_MOUT_PLL_SHARED1 7 +#define CLK_MOUT_PLL_SHARED2 8 +#define CLK_MOUT_PLL_SHARED3 9 +#define CLK_MOUT_PLL_SPARE 10 +#define CLK_MOUT_CMU_BO_BUS 11 +#define CLK_MOUT_CMU_BUS0_BUS 12 +#define CLK_MOUT_CMU_BUS1_BUS 13 +#define CLK_MOUT_CMU_BUS2_BUS 14 +#define CLK_MOUT_CMU_CIS_CLK0 15 +#define CLK_MOUT_CMU_CIS_CLK1 16 +#define CLK_MOUT_CMU_CIS_CLK2 17 +#define CLK_MOUT_CMU_CIS_CLK3 18 +#define CLK_MOUT_CMU_CIS_CLK4 19 +#define CLK_MOUT_CMU_CIS_CLK5 20 +#define CLK_MOUT_CMU_CIS_CLK6 21 +#define CLK_MOUT_CMU_CIS_CLK7 22 +#define CLK_MOUT_CMU_CMU_BOOST 23 +#define CLK_MOUT_CMU_BOOST_OPTION1 24 +#define CLK_MOUT_CMU_CORE_BUS 25 +#define CLK_MOUT_CMU_CPUCL0_DBG 26 +#define CLK_MOUT_CMU_CPUCL0_SWITCH 27 +#define CLK_MOUT_CMU_CPUCL1_SWITCH 28 +#define CLK_MOUT_CMU_CPUCL2_SWITCH 29 +#define CLK_MOUT_CMU_CSIS_BUS 30 +#define CLK_MOUT_CMU_DISP_BUS 31 +#define CLK_MOUT_CMU_DNS_BUS 32 +#define CLK_MOUT_CMU_DPU_BUS 33 +#define CLK_MOUT_CMU_EH_BUS 34 +#define CLK_MOUT_CMU_G2D_G2D 35 +#define CLK_MOUT_CMU_G2D_MSCL 36 +#define CLK_MOUT_CMU_G3AA_G3AA 37 +#define CLK_MOUT_CMU_G3D_BUSD 38 +#define CLK_MOUT_CMU_G3D_GLB 39 +#define CLK_MOUT_CMU_G3D_SWITCH 40 +#define CLK_MOUT_CMU_GDC_GDC0 41 +#define CLK_MOUT_CMU_GDC_GDC1 42 +#define CLK_MOUT_CMU_GDC_SCSC 43 +#define CLK_MOUT_CMU_HPM 44 +#define CLK_MOUT_CMU_HSI0_BUS 45 +#define CLK_MOUT_CMU_HSI0_DPGTC 46 +#define CLK_MOUT_CMU_HSI0_USB31DRD 47 +#define CLK_MOUT_CMU_HSI0_USBDPDBG 48 +#define CLK_MOUT_CMU_HSI1_BUS 49 +#define CLK_MOUT_CMU_HSI1_PCIE 50 +#define CLK_MOUT_CMU_HSI2_BUS 51 +#define CLK_MOUT_CMU_HSI2_MMC_CARD 52 +#define CLK_MOUT_CMU_HSI2_PCIE 53 +#define CLK_MOUT_CMU_HSI2_UFS_EMBD 54 +#define CLK_MOUT_CMU_IPP_BUS 55 +#define CLK_MOUT_CMU_ITP_BUS 56 +#define CLK_MOUT_CMU_MCSC_ITSC 57 +#define CLK_MOUT_CMU_MCSC_MCSC 58 +#define CLK_MOUT_CMU_MFC_MFC 59 +#define CLK_MOUT_CMU_MIF_BUSP 60 +#define CLK_MOUT_CMU_MIF_SWITCH 61 +#define CLK_MOUT_CMU_MISC_BUS 62 +#define CLK_MOUT_CMU_MISC_SSS 63 +#define CLK_MOUT_CMU_PDP_BUS 64 +#define CLK_MOUT_CMU_PDP_VRA 65 +#define CLK_MOUT_CMU_PERIC0_BUS 66 +#define CLK_MOUT_CMU_PERIC0_IP 67 +#define CLK_MOUT_CMU_PERIC1_BUS 68 +#define CLK_MOUT_CMU_PERIC1_IP 69 +#define CLK_MOUT_CMU_TNR_BUS 70 +#define CLK_MOUT_CMU_TOP_BOOST_OPTION1 71 +#define CLK_MOUT_CMU_TOP_CMUREF 72 +#define CLK_MOUT_CMU_TPU_BUS 73 +#define CLK_MOUT_CMU_TPU_TPU 74 +#define CLK_MOUT_CMU_TPU_TPUCTL 75 +#define CLK_MOUT_CMU_TPU_UART 76 +#define CLK_MOUT_CMU_CMUREF 77 + +/* CMU_TOP Dividers */ +#define CLK_DOUT_CMU_BO_BUS 78 +#define CLK_DOUT_CMU_BUS0_BUS 79 +#define CLK_DOUT_CMU_BUS1_BUS 80 +#define CLK_DOUT_CMU_BUS2_BUS 81 +#define CLK_DOUT_CMU_CIS_CLK0 82 +#define CLK_DOUT_CMU_CIS_CLK1 83 +#define CLK_DOUT_CMU_CIS_CLK2 84 +#define CLK_DOUT_CMU_CIS_CLK3 85 +#define CLK_DOUT_CMU_CIS_CLK4 86 +#define CLK_DOUT_CMU_CIS_CLK5 87 +#define CLK_DOUT_CMU_CIS_CLK6 88 +#define CLK_DOUT_CMU_CIS_CLK7 89 +#define CLK_DOUT_CMU_CORE_BUS 90 +#define CLK_DOUT_CMU_CPUCL0_DBG 91 +#define CLK_DOUT_CMU_CPUCL0_SWITCH 92 +#define CLK_DOUT_CMU_CPUCL1_SWITCH 93 +#define CLK_DOUT_CMU_CPUCL2_SWITCH 94 +#define CLK_DOUT_CMU_CSIS_BUS 95 +#define CLK_DOUT_CMU_DISP_BUS 96 +#define CLK_DOUT_CMU_DNS_BUS 97 +#define CLK_DOUT_CMU_DPU_BUS 98 +#define CLK_DOUT_CMU_EH_BUS 99 +#define CLK_DOUT_CMU_G2D_G2D 100 +#define CLK_DOUT_CMU_G2D_MSCL 101 +#define CLK_DOUT_CMU_G3AA_G3AA 102 +#define CLK_DOUT_CMU_G3D_BUSD 103 +#define CLK_DOUT_CMU_G3D_GLB 104 +#define CLK_DOUT_CMU_G3D_SWITCH 105 +#define CLK_DOUT_CMU_GDC_GDC0 106 +#define CLK_DOUT_CMU_GDC_GDC1 107 +#define CLK_DOUT_CMU_GDC_SCSC 108 +#define CLK_DOUT_CMU_CMU_HPM 109 +#define CLK_DOUT_CMU_HSI0_BUS 110 +#define CLK_DOUT_CMU_HSI0_DPGTC 111 +#define CLK_DOUT_CMU_HSI0_USB31DRD 112 +#define CLK_DOUT_CMU_HSI0_USBDPDBG 113 +#define CLK_DOUT_CMU_HSI1_BUS 114 +#define CLK_DOUT_CMU_HSI1_PCIE 115 +#define CLK_DOUT_CMU_HSI2_BUS 116 +#define CLK_DOUT_CMU_HSI2_MMC_CARD 117 +#define CLK_DOUT_CMU_HSI2_PCIE 118 +#define CLK_DOUT_CMU_HSI2_UFS_EMBD 119 +#define CLK_DOUT_CMU_IPP_BUS 120 +#define CLK_DOUT_CMU_ITP_BUS 121 +#define CLK_DOUT_CMU_MCSC_ITSC 122 +#define CLK_DOUT_CMU_MCSC_MCSC 123 +#define CLK_DOUT_CMU_MFC_MFC 124 +#define CLK_DOUT_CMU_MIF_BUSP 125 +#define CLK_DOUT_CMU_MISC_BUS 126 +#define CLK_DOUT_CMU_MISC_SSS 127 +#define CLK_DOUT_CMU_OTP 128 +#define CLK_DOUT_CMU_PDP_BUS 129 +#define CLK_DOUT_CMU_PDP_VRA 130 +#define CLK_DOUT_CMU_PERIC0_BUS 131 +#define CLK_DOUT_CMU_PERIC0_IP 132 +#define CLK_DOUT_CMU_PERIC1_BUS 133 +#define CLK_DOUT_CMU_PERIC1_IP 134 +#define CLK_DOUT_CMU_TNR_BUS 135 +#define CLK_DOUT_CMU_TPU_BUS 136 +#define CLK_DOUT_CMU_TPU_TPU 137 +#define CLK_DOUT_CMU_TPU_TPUCTL 138 +#define CLK_DOUT_CMU_TPU_UART 139 +#define CLK_DOUT_CMU_CMU_BOOST 140 +#define CLK_DOUT_CMU_CMU_CMUREF 141 +#define CLK_DOUT_CMU_SHARED0_DIV2 142 +#define CLK_DOUT_CMU_SHARED0_DIV3 143 +#define CLK_DOUT_CMU_SHARED0_DIV4 144 +#define CLK_DOUT_CMU_SHARED0_DIV5 145 +#define CLK_DOUT_CMU_SHARED1_DIV2 146 +#define CLK_DOUT_CMU_SHARED1_DIV3 147 +#define CLK_DOUT_CMU_SHARED1_DIV4 148 +#define CLK_DOUT_CMU_SHARED2_DIV2 149 +#define CLK_DOUT_CMU_SHARED3_DIV2 150 + +/* CMU_TOP Gates */ +#define CLK_GOUT_CMU_BUS0_BOOST 151 +#define CLK_GOUT_CMU_BUS1_BOOST 152 +#define CLK_GOUT_CMU_BUS2_BOOST 153 +#define CLK_GOUT_CMU_CORE_BOOST 154 +#define CLK_GOUT_CMU_CPUCL0_BOOST 155 +#define CLK_GOUT_CMU_CPUCL1_BOOST 156 +#define CLK_GOUT_CMU_CPUCL2_BOOST 157 +#define CLK_GOUT_CMU_MIF_BOOST 158 +#define CLK_GOUT_CMU_MIF_SWITCH 159 +#define CLK_GOUT_CMU_BO_BUS 160 +#define CLK_GOUT_CMU_BUS0_BUS 161 +#define CLK_GOUT_CMU_BUS1_BUS 162 +#define CLK_GOUT_CMU_BUS2_BUS 163 +#define CLK_GOUT_CMU_CIS_CLK0 164 +#define CLK_GOUT_CMU_CIS_CLK1 165 +#define CLK_GOUT_CMU_CIS_CLK2 166 +#define CLK_GOUT_CMU_CIS_CLK3 167 +#define CLK_GOUT_CMU_CIS_CLK4 168 +#define CLK_GOUT_CMU_CIS_CLK5 169 +#define CLK_GOUT_CMU_CIS_CLK6 170 +#define CLK_GOUT_CMU_CIS_CLK7 171 +#define CLK_GOUT_CMU_CMU_BOOST 172 +#define CLK_GOUT_CMU_CORE_BUS 173 +#define CLK_GOUT_CMU_CPUCL0_DBG 174 +#define CLK_GOUT_CMU_CPUCL0_SWITCH 175 +#define CLK_GOUT_CMU_CPUCL1_SWITCH 176 +#define CLK_GOUT_CMU_CPUCL2_SWITCH 177 +#define CLK_GOUT_CMU_CSIS_BUS 178 +#define CLK_GOUT_CMU_DISP_BUS 179 +#define CLK_GOUT_CMU_DNS_BUS 180 +#define CLK_GOUT_CMU_DPU_BUS 181 +#define CLK_GOUT_CMU_EH_BUS 182 +#define CLK_GOUT_CMU_G2D_G2D 183 +#define CLK_GOUT_CMU_G2D_MSCL 184 +#define CLK_GOUT_CMU_G3AA_G3AA 185 +#define CLK_GOUT_CMU_G3D_BUSD 186 +#define CLK_GOUT_CMU_G3D_GLB 187 +#define CLK_GOUT_CMU_G3D_SWITCH 188 +#define CLK_GOUT_CMU_GDC_GDC0 189 +#define CLK_GOUT_CMU_GDC_GDC1 190 +#define CLK_GOUT_CMU_GDC_SCSC 191 +#define CLK_GOUT_CMU_HPM 192 +#define CLK_GOUT_CMU_HSI0_BUS 193 +#define CLK_GOUT_CMU_HSI0_DPGTC 194 +#define CLK_GOUT_CMU_HSI0_USB31DRD 195 +#define CLK_GOUT_CMU_HSI0_USBDPDBG 196 +#define CLK_GOUT_CMU_HSI1_BUS 197 +#define CLK_GOUT_CMU_HSI1_PCIE 198 +#define CLK_GOUT_CMU_HSI2_BUS 199 +#define CLK_GOUT_CMU_HSI2_MMC_CARD 200 +#define CLK_GOUT_CMU_HSI2_PCIE 201 +#define CLK_GOUT_CMU_HSI2_UFS_EMBD 202 +#define CLK_GOUT_CMU_IPP_BUS 203 +#define CLK_GOUT_CMU_ITP_BUS 204 +#define CLK_GOUT_CMU_MCSC_ITSC 205 +#define CLK_GOUT_CMU_MCSC_MCSC 206 +#define CLK_GOUT_CMU_MFC_MFC 207 +#define CLK_GOUT_CMU_MIF_BUSP 208 +#define CLK_GOUT_CMU_MISC_BUS 209 +#define CLK_GOUT_CMU_MISC_SSS 210 +#define CLK_GOUT_CMU_PDP_BUS 211 +#define CLK_GOUT_CMU_PDP_VRA 212 +#define CLK_GOUT_CMU_G3AA 213 +#define CLK_GOUT_CMU_PERIC0_BUS 214 +#define CLK_GOUT_CMU_PERIC0_IP 215 +#define CLK_GOUT_CMU_PERIC1_BUS 216 +#define CLK_GOUT_CMU_PERIC1_IP 217 +#define CLK_GOUT_CMU_TNR_BUS 218 +#define CLK_GOUT_CMU_TOP_CMUREF 219 +#define CLK_GOUT_CMU_TPU_BUS 220 +#define CLK_GOUT_CMU_TPU_TPU 221 +#define CLK_GOUT_CMU_TPU_TPUCTL 222 +#define CLK_GOUT_CMU_TPU_UART 223 + +/* CMU_APM */ +#define CLK_MOUT_APM_FUNC 1 +#define CLK_MOUT_APM_FUNCSRC 2 +#define CLK_DOUT_APM_BOOST 3 +#define CLK_DOUT_APM_USI0_UART 4 +#define CLK_DOUT_APM_USI0_USI 5 +#define CLK_DOUT_APM_USI1_UART 6 +#define CLK_GOUT_APM_APM_CMU_APM_PCLK 7 +#define CLK_GOUT_BUS0_BOOST_OPTION1 8 +#define CLK_GOUT_CMU_BOOST_OPTION1 9 +#define CLK_GOUT_CORE_BOOST_OPTION1 10 +#define CLK_GOUT_APM_FUNC 11 +#define CLK_GOUT_APM_APBIF_GPIO_ALIVE_PCLK 12 +#define CLK_GOUT_APM_APBIF_GPIO_FAR_ALIVE_PCLK 13 +#define CLK_GOUT_APM_APBIF_PMU_ALIVE_PCLK 14 +#define CLK_GOUT_APM_APBIF_RTC_PCLK 15 +#define CLK_GOUT_APM_APBIF_TRTC_PCLK 16 +#define CLK_GOUT_APM_APM_USI0_UART_IPCLK 17 +#define CLK_GOUT_APM_APM_USI0_UART_PCLK 18 +#define CLK_GOUT_APM_APM_USI0_USI_IPCLK 19 +#define CLK_GOUT_APM_APM_USI0_USI_PCLK 20 +#define CLK_GOUT_APM_APM_USI1_UART_IPCLK 21 +#define CLK_GOUT_APM_APM_USI1_UART_PCLK 22 +#define CLK_GOUT_APM_D_TZPC_APM_PCLK 23 +#define CLK_GOUT_APM_GPC_APM_PCLK 24 +#define CLK_GOUT_APM_GREBEINTEGRATION_HCLK 25 +#define CLK_GOUT_APM_INTMEM_ACLK 26 +#define CLK_GOUT_APM_INTMEM_PCLK 27 +#define CLK_GOUT_APM_LHM_AXI_G_SWD_I_CLK 28 +#define CLK_GOUT_APM_LHM_AXI_P_AOCAPM_I_CLK 29 +#define CLK_GOUT_APM_LHM_AXI_P_APM_I_CLK 30 +#define CLK_GOUT_APM_LHS_AXI_D_APM_I_CLK 31 +#define CLK_GOUT_APM_LHS_AXI_G_DBGCORE_I_CLK 32 +#define CLK_GOUT_APM_LHS_AXI_G_SCAN2DRAM_I_CLK 33 +#define CLK_GOUT_APM_MAILBOX_APM_AOC_PCLK 34 +#define CLK_GOUT_APM_MAILBOX_APM_AP_PCLK 35 +#define CLK_GOUT_APM_MAILBOX_APM_GSA_PCLK 36 +#define CLK_GOUT_APM_MAILBOX_APM_SWD_PCLK 37 +#define CLK_GOUT_APM_MAILBOX_APM_TPU_PCLK 38 +#define CLK_GOUT_APM_MAILBOX_AP_AOC_PCLK 39 +#define CLK_GOUT_APM_MAILBOX_AP_DBGCORE_PCLK 40 +#define CLK_GOUT_APM_PMU_INTR_GEN_PCLK 41 +#define CLK_GOUT_APM_ROM_CRC32_HOST_ACLK 42 +#define CLK_GOUT_APM_ROM_CRC32_HOST_PCLK 43 +#define CLK_GOUT_APM_CLK_APM_BUS_CLK 44 +#define CLK_GOUT_APM_CLK_APM_USI0_UART_CLK 45 +#define CLK_GOUT_APM_CLK_APM_USI0_USI_CLK 46 +#define CLK_GOUT_APM_CLK_APM_USI1_UART_CLK 47 +#define CLK_GOUT_APM_SPEEDY_APM_PCLK 48 +#define CLK_GOUT_APM_SPEEDY_SUB_APM_PCLK 49 +#define CLK_GOUT_APM_SSMT_D_APM_ACLK 50 +#define CLK_GOUT_APM_SSMT_D_APM_PCLK 51 +#define CLK_GOUT_APM_SSMT_G_DBGCORE_ACLK 52 +#define CLK_GOUT_APM_SSMT_G_DBGCORE_PCLK 53 +#define CLK_GOUT_APM_SS_DBGCORE_SS_DBGCORE_HCLK 54 +#define CLK_GOUT_APM_SYSMMU_D_APM_CLK_S2 55 +#define CLK_GOUT_APM_SYSREG_APM_PCLK 56 +#define CLK_GOUT_APM_UASC_APM_ACLK 57 +#define CLK_GOUT_APM_UASC_APM_PCLK 58 +#define CLK_GOUT_APM_UASC_DBGCORE_ACLK 59 +#define CLK_GOUT_APM_UASC_DBGCORE_PCLK 60 +#define CLK_GOUT_APM_UASC_G_SWD_ACLK 61 +#define CLK_GOUT_APM_UASC_G_SWD_PCLK 62 +#define CLK_GOUT_APM_UASC_P_AOCAPM_ACLK 63 +#define CLK_GOUT_APM_UASC_P_AOCAPM_PCLK 64 +#define CLK_GOUT_APM_UASC_P_APM_ACLK 65 +#define CLK_GOUT_APM_UASC_P_APM_PCLK 66 +#define CLK_GOUT_APM_WDT_APM_PCLK 67 +#define CLK_GOUT_APM_XIU_DP_APM_ACLK 68 +#define CLK_APM_PLL_DIV2_APM 69 +#define CLK_APM_PLL_DIV4_APM 70 +#define CLK_APM_PLL_DIV16_APM 71 + +/* CMU_MISC */ +#define CLK_MOUT_MISC_BUS_USER 1 +#define CLK_MOUT_MISC_SSS_USER 2 +#define CLK_MOUT_MISC_GIC 3 +#define CLK_DOUT_MISC_BUSP 4 +#define CLK_DOUT_MISC_GIC 5 +#define CLK_GOUT_MISC_MISC_CMU_MISC_PCLK 6 +#define CLK_GOUT_MISC_OTP_CON_BIRA_I_OSCCLK 7 +#define CLK_GOUT_MISC_OTP_CON_BISR_I_OSCCLK 8 +#define CLK_GOUT_MISC_OTP_CON_TOP_I_OSCCLK 9 +#define CLK_GOUT_MISC_CLK_MISC_OSCCLK_CLK 10 +#define CLK_GOUT_MISC_ADM_AHB_SSS_HCLKM 11 +#define CLK_GOUT_MISC_AD_APB_DIT_PCLKM 12 +#define CLK_GOUT_MISC_AD_APB_PUF_PCLKM 13 +#define CLK_GOUT_MISC_DIT_ICLKL2A 14 +#define CLK_GOUT_MISC_D_TZPC_MISC_PCLK 15 +#define CLK_GOUT_MISC_GIC_GICCLK 16 +#define CLK_GOUT_MISC_GPC_MISC_PCLK 17 +#define CLK_GOUT_MISC_LHM_AST_ICC_CPUGIC_I_CLK 18 +#define CLK_GOUT_MISC_LHM_AXI_D_SSS_I_CLK 19 +#define CLK_GOUT_MISC_LHM_AXI_P_GIC_I_CLK 20 +#define CLK_GOUT_MISC_LHM_AXI_P_MISC_I_CLK 21 +#define CLK_GOUT_MISC_LHS_ACEL_D_MISC_I_CLK 22 +#define CLK_GOUT_MISC_LHS_AST_IRI_GICCPU_I_CLK 23 +#define CLK_GOUT_MISC_LHS_AXI_D_SSS_I_CLK 24 +#define CLK_GOUT_MISC_MCT_PCLK 25 +#define CLK_GOUT_MISC_OTP_CON_BIRA_PCLK 26 +#define CLK_GOUT_MISC_OTP_CON_BISR_PCLK 27 +#define CLK_GOUT_MISC_OTP_CON_TOP_PCLK 28 +#define CLK_GOUT_MISC_PDMA_ACLK 29 +#define CLK_GOUT_MISC_PPMU_DMA_ACLK 30 +#define CLK_GOUT_MISC_PPMU_MISC_ACLK 31 +#define CLK_GOUT_MISC_PPMU_MISC_PCLK 32 +#define CLK_GOUT_MISC_PUF_I_CLK 33 +#define CLK_GOUT_MISC_QE_DIT_ACLK 34 +#define CLK_GOUT_MISC_QE_DIT_PCLK 35 +#define CLK_GOUT_MISC_QE_PDMA_ACLK 36 +#define CLK_GOUT_MISC_QE_PDMA_PCLK 37 +#define CLK_GOUT_MISC_QE_PPMU_DMA_ACLK 38 +#define CLK_GOUT_MISC_QE_PPMU_DMA_PCLK 39 +#define CLK_GOUT_MISC_QE_RTIC_ACLK 40 +#define CLK_GOUT_MISC_QE_RTIC_PCLK 41 +#define CLK_GOUT_MISC_QE_SPDMA_ACLK 42 +#define CLK_GOUT_MISC_QE_SPDMA_PCLK 43 +#define CLK_GOUT_MISC_QE_SSS_ACLK 44 +#define CLK_GOUT_MISC_QE_SSS_PCLK 45 +#define CLK_GOUT_MISC_CLK_MISC_BUSD_CLK 46 +#define CLK_GOUT_MISC_CLK_MISC_BUSP_CLK 47 +#define CLK_GOUT_MISC_CLK_MISC_GIC_CLK 48 +#define CLK_GOUT_MISC_CLK_MISC_SSS_CLK 49 +#define CLK_GOUT_MISC_RTIC_I_ACLK 50 +#define CLK_GOUT_MISC_RTIC_I_PCLK 51 +#define CLK_GOUT_MISC_SPDMA_ACLK 52 +#define CLK_GOUT_MISC_SSMT_DIT_ACLK 53 +#define CLK_GOUT_MISC_SSMT_DIT_PCLK 54 +#define CLK_GOUT_MISC_SSMT_PDMA_ACLK 55 +#define CLK_GOUT_MISC_SSMT_PDMA_PCLK 56 +#define CLK_GOUT_MISC_SSMT_PPMU_DMA_ACLK 57 +#define CLK_GOUT_MISC_SSMT_PPMU_DMA_PCLK 58 +#define CLK_GOUT_MISC_SSMT_RTIC_ACLK 59 +#define CLK_GOUT_MISC_SSMT_RTIC_PCLK 60 +#define CLK_GOUT_MISC_SSMT_SPDMA_ACLK 61 +#define CLK_GOUT_MISC_SSMT_SPDMA_PCLK 62 +#define CLK_GOUT_MISC_SSMT_SSS_ACLK 63 +#define CLK_GOUT_MISC_SSMT_SSS_PCLK 64 +#define CLK_GOUT_MISC_SSS_I_ACLK 65 +#define CLK_GOUT_MISC_SSS_I_PCLK 66 +#define CLK_GOUT_MISC_SYSMMU_MISC_CLK_S2 67 +#define CLK_GOUT_MISC_SYSMMU_SSS_CLK_S1 68 +#define CLK_GOUT_MISC_SYSREG_MISC_PCLK 69 +#define CLK_GOUT_MISC_TMU_SUB_PCLK 70 +#define CLK_GOUT_MISC_TMU_TOP_PCLK 71 +#define CLK_GOUT_MISC_WDT_CLUSTER0_PCLK 72 +#define CLK_GOUT_MISC_WDT_CLUSTER1_PCLK 73 +#define CLK_GOUT_MISC_XIU_D_MISC_ACLK 74 + +#endif /* _DT_BINDINGS_CLOCK_GOOGLE_GS101_H */ diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index 11cb0a4fe999..7da4243984b2 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -376,7 +376,6 @@ #define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36 #define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37 #define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38 -#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39 #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40 #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41 #define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42 diff --git a/include/dt-bindings/clock/mediatek,mt7988-clk.h b/include/dt-bindings/clock/mediatek,mt7988-clk.h new file mode 100644 index 000000000000..63376e40f14d --- /dev/null +++ b/include/dt-bindings/clock/mediatek,mt7988-clk.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023 MediaTek Inc. + * Author: Sam Shih <sam.shih@mediatek.com> + * Author: Xiufeng Li <Xiufeng.Li@mediatek.com> + */ + +#ifndef _DT_BINDINGS_CLK_MT7988_H +#define _DT_BINDINGS_CLK_MT7988_H + +/* APMIXEDSYS */ + +#define CLK_APMIXED_NETSYSPLL 0 +#define CLK_APMIXED_MPLL 1 +#define CLK_APMIXED_MMPLL 2 +#define CLK_APMIXED_APLL2 3 +#define CLK_APMIXED_NET1PLL 4 +#define CLK_APMIXED_NET2PLL 5 +#define CLK_APMIXED_WEDMCUPLL 6 +#define CLK_APMIXED_SGMPLL 7 +#define CLK_APMIXED_ARM_B 8 +#define CLK_APMIXED_CCIPLL2_B 9 +#define CLK_APMIXED_USXGMIIPLL 10 +#define CLK_APMIXED_MSDCPLL 11 + +/* TOPCKGEN */ + +#define CLK_TOP_XTAL 0 +#define CLK_TOP_XTAL_D2 1 +#define CLK_TOP_RTC_32K 2 +#define CLK_TOP_RTC_32P7K 3 +#define CLK_TOP_MPLL_D2 4 +#define CLK_TOP_MPLL_D3_D2 5 +#define CLK_TOP_MPLL_D4 6 +#define CLK_TOP_MPLL_D8 7 +#define CLK_TOP_MPLL_D8_D2 8 +#define CLK_TOP_MMPLL_D2 9 +#define CLK_TOP_MMPLL_D3_D5 10 +#define CLK_TOP_MMPLL_D4 11 +#define CLK_TOP_MMPLL_D6_D2 12 +#define CLK_TOP_MMPLL_D8 13 +#define CLK_TOP_APLL2_D4 14 +#define CLK_TOP_NET1PLL_D4 15 +#define CLK_TOP_NET1PLL_D5 16 +#define CLK_TOP_NET1PLL_D5_D2 17 +#define CLK_TOP_NET1PLL_D5_D4 18 +#define CLK_TOP_NET1PLL_D8 19 +#define CLK_TOP_NET1PLL_D8_D2 20 +#define CLK_TOP_NET1PLL_D8_D4 21 +#define CLK_TOP_NET1PLL_D8_D8 22 +#define CLK_TOP_NET1PLL_D8_D16 23 +#define CLK_TOP_NET2PLL_D2 24 +#define CLK_TOP_NET2PLL_D4 25 +#define CLK_TOP_NET2PLL_D4_D4 26 +#define CLK_TOP_NET2PLL_D4_D8 27 +#define CLK_TOP_NET2PLL_D6 28 +#define CLK_TOP_NET2PLL_D8 29 +#define CLK_TOP_NETSYS_SEL 30 +#define CLK_TOP_NETSYS_500M_SEL 31 +#define CLK_TOP_NETSYS_2X_SEL 32 +#define CLK_TOP_NETSYS_GSW_SEL 33 +#define CLK_TOP_ETH_GMII_SEL 34 +#define CLK_TOP_NETSYS_MCU_SEL 35 +#define CLK_TOP_NETSYS_PAO_2X_SEL 36 +#define CLK_TOP_EIP197_SEL 37 +#define CLK_TOP_AXI_INFRA_SEL 38 +#define CLK_TOP_UART_SEL 39 +#define CLK_TOP_EMMC_250M_SEL 40 +#define CLK_TOP_EMMC_400M_SEL 41 +#define CLK_TOP_SPI_SEL 42 +#define CLK_TOP_SPIM_MST_SEL 43 +#define CLK_TOP_NFI1X_SEL 44 +#define CLK_TOP_SPINFI_SEL 45 +#define CLK_TOP_PWM_SEL 46 +#define CLK_TOP_I2C_SEL 47 +#define CLK_TOP_PCIE_MBIST_250M_SEL 48 +#define CLK_TOP_PEXTP_TL_SEL 49 +#define CLK_TOP_PEXTP_TL_P1_SEL 50 +#define CLK_TOP_PEXTP_TL_P2_SEL 51 +#define CLK_TOP_PEXTP_TL_P3_SEL 52 +#define CLK_TOP_USB_SYS_SEL 53 +#define CLK_TOP_USB_SYS_P1_SEL 54 +#define CLK_TOP_USB_XHCI_SEL 55 +#define CLK_TOP_USB_XHCI_P1_SEL 56 +#define CLK_TOP_USB_FRMCNT_SEL 57 +#define CLK_TOP_USB_FRMCNT_P1_SEL 58 +#define CLK_TOP_AUD_SEL 59 +#define CLK_TOP_A1SYS_SEL 60 +#define CLK_TOP_AUD_L_SEL 61 +#define CLK_TOP_A_TUNER_SEL 62 +#define CLK_TOP_SSPXTP_SEL 63 +#define CLK_TOP_USB_PHY_SEL 64 +#define CLK_TOP_USXGMII_SBUS_0_SEL 65 +#define CLK_TOP_USXGMII_SBUS_1_SEL 66 +#define CLK_TOP_SGM_0_SEL 67 +#define CLK_TOP_SGM_SBUS_0_SEL 68 +#define CLK_TOP_SGM_1_SEL 69 +#define CLK_TOP_SGM_SBUS_1_SEL 70 +#define CLK_TOP_XFI_PHY_0_XTAL_SEL 71 +#define CLK_TOP_XFI_PHY_1_XTAL_SEL 72 +#define CLK_TOP_SYSAXI_SEL 73 +#define CLK_TOP_SYSAPB_SEL 74 +#define CLK_TOP_ETH_REFCK_50M_SEL 75 +#define CLK_TOP_ETH_SYS_200M_SEL 76 +#define CLK_TOP_ETH_SYS_SEL 77 +#define CLK_TOP_ETH_XGMII_SEL 78 +#define CLK_TOP_BUS_TOPS_SEL 79 +#define CLK_TOP_NPU_TOPS_SEL 80 +#define CLK_TOP_DRAMC_SEL 81 +#define CLK_TOP_DRAMC_MD32_SEL 82 +#define CLK_TOP_INFRA_F26M_SEL 83 +#define CLK_TOP_PEXTP_P0_SEL 84 +#define CLK_TOP_PEXTP_P1_SEL 85 +#define CLK_TOP_PEXTP_P2_SEL 86 +#define CLK_TOP_PEXTP_P3_SEL 87 +#define CLK_TOP_DA_XTP_GLB_P0_SEL 88 +#define CLK_TOP_DA_XTP_GLB_P1_SEL 89 +#define CLK_TOP_DA_XTP_GLB_P2_SEL 90 +#define CLK_TOP_DA_XTP_GLB_P3_SEL 91 +#define CLK_TOP_CKM_SEL 92 +#define CLK_TOP_DA_SEL 93 +#define CLK_TOP_PEXTP_SEL 94 +#define CLK_TOP_TOPS_P2_26M_SEL 95 +#define CLK_TOP_MCUSYS_BACKUP_625M_SEL 96 +#define CLK_TOP_NETSYS_SYNC_250M_SEL 97 +#define CLK_TOP_MACSEC_SEL 98 +#define CLK_TOP_NETSYS_TOPS_400M_SEL 99 +#define CLK_TOP_NETSYS_PPEFB_250M_SEL 100 +#define CLK_TOP_NETSYS_WARP_SEL 101 +#define CLK_TOP_ETH_MII_SEL 102 +#define CLK_TOP_NPU_SEL 103 +#define CLK_TOP_AUD_I2S_M 104 + +/* MCUSYS */ + +#define CLK_MCU_BUS_DIV_SEL 0 +#define CLK_MCU_ARM_DIV_SEL 1 + +/* INFRACFG_AO */ + +#define CLK_INFRA_MUX_UART0_SEL 0 +#define CLK_INFRA_MUX_UART1_SEL 1 +#define CLK_INFRA_MUX_UART2_SEL 2 +#define CLK_INFRA_MUX_SPI0_SEL 3 +#define CLK_INFRA_MUX_SPI1_SEL 4 +#define CLK_INFRA_MUX_SPI2_SEL 5 +#define CLK_INFRA_PWM_SEL 6 +#define CLK_INFRA_PWM_CK1_SEL 7 +#define CLK_INFRA_PWM_CK2_SEL 8 +#define CLK_INFRA_PWM_CK3_SEL 9 +#define CLK_INFRA_PWM_CK4_SEL 10 +#define CLK_INFRA_PWM_CK5_SEL 11 +#define CLK_INFRA_PWM_CK6_SEL 12 +#define CLK_INFRA_PWM_CK7_SEL 13 +#define CLK_INFRA_PWM_CK8_SEL 14 +#define CLK_INFRA_PCIE_GFMUX_TL_O_P0_SEL 15 +#define CLK_INFRA_PCIE_GFMUX_TL_O_P1_SEL 16 +#define CLK_INFRA_PCIE_GFMUX_TL_O_P2_SEL 17 +#define CLK_INFRA_PCIE_GFMUX_TL_O_P3_SEL 18 + +/* INFRACFG */ + +#define CLK_INFRA_PCIE_PERI_26M_CK_P0 19 +#define CLK_INFRA_PCIE_PERI_26M_CK_P1 20 +#define CLK_INFRA_PCIE_PERI_26M_CK_P2 21 +#define CLK_INFRA_PCIE_PERI_26M_CK_P3 22 +#define CLK_INFRA_66M_GPT_BCK 23 +#define CLK_INFRA_66M_PWM_HCK 24 +#define CLK_INFRA_66M_PWM_BCK 25 +#define CLK_INFRA_66M_PWM_CK1 26 +#define CLK_INFRA_66M_PWM_CK2 27 +#define CLK_INFRA_66M_PWM_CK3 28 +#define CLK_INFRA_66M_PWM_CK4 29 +#define CLK_INFRA_66M_PWM_CK5 30 +#define CLK_INFRA_66M_PWM_CK6 31 +#define CLK_INFRA_66M_PWM_CK7 32 +#define CLK_INFRA_66M_PWM_CK8 33 +#define CLK_INFRA_133M_CQDMA_BCK 34 +#define CLK_INFRA_66M_AUD_SLV_BCK 35 +#define CLK_INFRA_AUD_26M 36 +#define CLK_INFRA_AUD_L 37 +#define CLK_INFRA_AUD_AUD 38 +#define CLK_INFRA_AUD_EG2 39 +#define CLK_INFRA_DRAMC_F26M 40 +#define CLK_INFRA_133M_DBG_ACKM 41 +#define CLK_INFRA_66M_AP_DMA_BCK 42 +#define CLK_INFRA_66M_SEJ_BCK 43 +#define CLK_INFRA_PRE_CK_SEJ_F13M 44 +#define CLK_INFRA_26M_THERM_SYSTEM 45 +#define CLK_INFRA_I2C_BCK 46 +#define CLK_INFRA_52M_UART0_CK 47 +#define CLK_INFRA_52M_UART1_CK 48 +#define CLK_INFRA_52M_UART2_CK 49 +#define CLK_INFRA_NFI 50 +#define CLK_INFRA_SPINFI 51 +#define CLK_INFRA_66M_NFI_HCK 52 +#define CLK_INFRA_104M_SPI0 53 +#define CLK_INFRA_104M_SPI1 54 +#define CLK_INFRA_104M_SPI2_BCK 55 +#define CLK_INFRA_66M_SPI0_HCK 56 +#define CLK_INFRA_66M_SPI1_HCK 57 +#define CLK_INFRA_66M_SPI2_HCK 58 +#define CLK_INFRA_66M_FLASHIF_AXI 59 +#define CLK_INFRA_RTC 60 +#define CLK_INFRA_26M_ADC_BCK 61 +#define CLK_INFRA_RC_ADC 62 +#define CLK_INFRA_MSDC400 63 +#define CLK_INFRA_MSDC2_HCK 64 +#define CLK_INFRA_133M_MSDC_0_HCK 65 +#define CLK_INFRA_66M_MSDC_0_HCK 66 +#define CLK_INFRA_133M_CPUM_BCK 67 +#define CLK_INFRA_BIST2FPC 68 +#define CLK_INFRA_I2C_X16W_MCK_CK_P1 69 +#define CLK_INFRA_I2C_X16W_PCK_CK_P1 70 +#define CLK_INFRA_133M_USB_HCK 71 +#define CLK_INFRA_133M_USB_HCK_CK_P1 72 +#define CLK_INFRA_66M_USB_HCK 73 +#define CLK_INFRA_66M_USB_HCK_CK_P1 74 +#define CLK_INFRA_USB_SYS 75 +#define CLK_INFRA_USB_SYS_CK_P1 76 +#define CLK_INFRA_USB_REF 77 +#define CLK_INFRA_USB_CK_P1 78 +#define CLK_INFRA_USB_FRMCNT 79 +#define CLK_INFRA_USB_FRMCNT_CK_P1 80 +#define CLK_INFRA_USB_PIPE 81 +#define CLK_INFRA_USB_PIPE_CK_P1 82 +#define CLK_INFRA_USB_UTMI 83 +#define CLK_INFRA_USB_UTMI_CK_P1 84 +#define CLK_INFRA_USB_XHCI 85 +#define CLK_INFRA_USB_XHCI_CK_P1 86 +#define CLK_INFRA_PCIE_GFMUX_TL_P0 87 +#define CLK_INFRA_PCIE_GFMUX_TL_P1 88 +#define CLK_INFRA_PCIE_GFMUX_TL_P2 89 +#define CLK_INFRA_PCIE_GFMUX_TL_P3 90 +#define CLK_INFRA_PCIE_PIPE_P0 91 +#define CLK_INFRA_PCIE_PIPE_P1 92 +#define CLK_INFRA_PCIE_PIPE_P2 93 +#define CLK_INFRA_PCIE_PIPE_P3 94 +#define CLK_INFRA_133M_PCIE_CK_P0 95 +#define CLK_INFRA_133M_PCIE_CK_P1 96 +#define CLK_INFRA_133M_PCIE_CK_P2 97 +#define CLK_INFRA_133M_PCIE_CK_P3 98 + +/* ETHDMA */ + +#define CLK_ETHDMA_XGP1_EN 0 +#define CLK_ETHDMA_XGP2_EN 1 +#define CLK_ETHDMA_XGP3_EN 2 +#define CLK_ETHDMA_FE_EN 3 +#define CLK_ETHDMA_GP2_EN 4 +#define CLK_ETHDMA_GP1_EN 5 +#define CLK_ETHDMA_GP3_EN 6 +#define CLK_ETHDMA_ESW_EN 7 +#define CLK_ETHDMA_CRYPT0_EN 8 +#define CLK_ETHDMA_NR_CLK 9 + +/* SGMIISYS_0 */ + +#define CLK_SGM0_TX_EN 0 +#define CLK_SGM0_RX_EN 1 +#define CLK_SGMII0_NR_CLK 2 + +/* SGMIISYS_1 */ + +#define CLK_SGM1_TX_EN 0 +#define CLK_SGM1_RX_EN 1 +#define CLK_SGMII1_NR_CLK 2 + +/* ETHWARP */ + +#define CLK_ETHWARP_WOCPU2_EN 0 +#define CLK_ETHWARP_WOCPU1_EN 1 +#define CLK_ETHWARP_WOCPU0_EN 2 +#define CLK_ETHWARP_NR_CLK 3 + +/* XFIPLL */ +#define CLK_XFIPLL_PLL 0 +#define CLK_XFIPLL_PLL_EN 1 + +#endif /* _DT_BINDINGS_CLK_MT7988_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h index 2d545ed0d35a..9a9bc55b49af 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8939.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h @@ -193,6 +193,12 @@ #define GCC_VENUS0_CORE1_VCODEC0_CLK 184 #define GCC_OXILI_TIMER_CLK 185 #define SYSTEM_MM_NOC_BFDCD_CLK_SRC 186 +#define CSI2_CLK_SRC 187 +#define GCC_CAMSS_CSI2_AHB_CLK 188 +#define GCC_CAMSS_CSI2_CLK 189 +#define GCC_CAMSS_CSI2PHY_CLK 190 +#define GCC_CAMSS_CSI2PIX_CLK 191 +#define GCC_CAMSS_CSI2RDI_CLK 192 /* Indexes for GDSCs */ #define BIMC_GDSC 0 diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h index a62cb0629a7a..743ee60632eb 100644 --- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h +++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h @@ -121,7 +121,6 @@ #define MMSS_MMSSNOC_BTO_AHB_CLK 112 #define MMSS_MMSSNOC_AXI_CLK 113 #define MMSS_S0_AXI_CLK 114 -#define OCMEMCX_AHB_CLK 115 #define OCMEMCX_OCMEMNOC_CLK 116 #define OXILI_OCMEMGX_CLK 117 #define OCMEMNOC_CLK 118 diff --git a/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h new file mode 100644 index 000000000000..731e404a2ce6 --- /dev/null +++ b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H +#define _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H + +/* ECPRI_CC clocks */ +#define ECPRI_CC_PLL0 0 +#define ECPRI_CC_PLL1 1 +#define ECPRI_CC_ECPRI_CG_CLK 2 +#define ECPRI_CC_ECPRI_CLK_SRC 3 +#define ECPRI_CC_ECPRI_DMA_CLK 4 +#define ECPRI_CC_ECPRI_DMA_CLK_SRC 5 +#define ECPRI_CC_ECPRI_DMA_NOC_CLK 6 +#define ECPRI_CC_ECPRI_FAST_CLK 7 +#define ECPRI_CC_ECPRI_FAST_CLK_SRC 8 +#define ECPRI_CC_ECPRI_FAST_DIV2_CLK 9 +#define ECPRI_CC_ECPRI_FAST_DIV2_CLK_SRC 10 +#define ECPRI_CC_ECPRI_FAST_DIV2_NOC_CLK 11 +#define ECPRI_CC_ECPRI_FR_CLK 12 +#define ECPRI_CC_ECPRI_ORAN_CLK_SRC 13 +#define ECPRI_CC_ECPRI_ORAN_DIV2_CLK 14 +#define ECPRI_CC_ETH_100G_C2C0_HM_FF_CLK_SRC 15 +#define ECPRI_CC_ETH_100G_C2C0_UDP_FIFO_CLK 16 +#define ECPRI_CC_ETH_100G_C2C1_UDP_FIFO_CLK 17 +#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_0_CLK 18 +#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_1_CLK 19 +#define ECPRI_CC_ETH_100G_C2C_HM_FF_0_DIV_CLK_SRC 20 +#define ECPRI_CC_ETH_100G_C2C_HM_FF_1_DIV_CLK_SRC 21 +#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK 22 +#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK_SRC 23 +#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_CLK 24 +#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_DIV_CLK_SRC 25 +#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_CLK 26 +#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_DIV_CLK_SRC 27 +#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_CLK_SRC 28 +#define ECPRI_CC_ETH_100G_DBG_C2C_UDP_FIFO_CLK 29 +#define ECPRI_CC_ETH_100G_FH0_HM_FF_CLK_SRC 30 +#define ECPRI_CC_ETH_100G_FH0_MACSEC_CLK_SRC 31 +#define ECPRI_CC_ETH_100G_FH1_HM_FF_CLK_SRC 32 +#define ECPRI_CC_ETH_100G_FH1_MACSEC_CLK_SRC 33 +#define ECPRI_CC_ETH_100G_FH2_HM_FF_CLK_SRC 34 +#define ECPRI_CC_ETH_100G_FH2_MACSEC_CLK_SRC 35 +#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_CLK 36 +#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_DIV_CLK_SRC 37 +#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_CLK 38 +#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_DIV_CLK_SRC 39 +#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_CLK 40 +#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_DIV_CLK_SRC 41 +#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_CLK 42 +#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_DIV_CLK_SRC 43 +#define ECPRI_CC_ETH_100G_FH_0_UDP_FIFO_CLK 44 +#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_CLK 45 +#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_DIV_CLK_SRC 46 +#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_CLK 47 +#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_DIV_CLK_SRC 48 +#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_CLK 49 +#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_DIV_CLK_SRC 50 +#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_CLK 51 +#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_DIV_CLK_SRC 52 +#define ECPRI_CC_ETH_100G_FH_1_UDP_FIFO_CLK 53 +#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_CLK 54 +#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_DIV_CLK_SRC 55 +#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_CLK 56 +#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_DIV_CLK_SRC 57 +#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_CLK 58 +#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_DIV_CLK_SRC 59 +#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_CLK 60 +#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_DIV_CLK_SRC 61 +#define ECPRI_CC_ETH_100G_FH_2_UDP_FIFO_CLK 62 +#define ECPRI_CC_ETH_100G_FH_MACSEC_0_CLK 63 +#define ECPRI_CC_ETH_100G_FH_MACSEC_1_CLK 64 +#define ECPRI_CC_ETH_100G_FH_MACSEC_2_CLK 65 +#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK 66 +#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK_SRC 67 +#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK 68 +#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK_SRC 69 +#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK 70 +#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK_SRC 71 +#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK 72 +#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK_SRC 73 +#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK 74 +#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK_SRC 75 +#define ECPRI_CC_ETH_DBG_NFAPI_AXI_CLK 76 +#define ECPRI_CC_ETH_DBG_NOC_AXI_CLK 77 +#define ECPRI_CC_ETH_PHY_0_OCK_SRAM_CLK 78 +#define ECPRI_CC_ETH_PHY_1_OCK_SRAM_CLK 79 +#define ECPRI_CC_ETH_PHY_2_OCK_SRAM_CLK 80 +#define ECPRI_CC_ETH_PHY_3_OCK_SRAM_CLK 81 +#define ECPRI_CC_ETH_PHY_4_OCK_SRAM_CLK 82 +#define ECPRI_CC_MSS_EMAC_CLK 83 +#define ECPRI_CC_MSS_EMAC_CLK_SRC 84 +#define ECPRI_CC_MSS_ORAN_CLK 85 +#define ECPRI_CC_PHY0_LANE0_RX_CLK 86 +#define ECPRI_CC_PHY0_LANE0_TX_CLK 87 +#define ECPRI_CC_PHY0_LANE1_RX_CLK 88 +#define ECPRI_CC_PHY0_LANE1_TX_CLK 89 +#define ECPRI_CC_PHY0_LANE2_RX_CLK 90 +#define ECPRI_CC_PHY0_LANE2_TX_CLK 91 +#define ECPRI_CC_PHY0_LANE3_RX_CLK 92 +#define ECPRI_CC_PHY0_LANE3_TX_CLK 93 +#define ECPRI_CC_PHY1_LANE0_RX_CLK 94 +#define ECPRI_CC_PHY1_LANE0_TX_CLK 95 +#define ECPRI_CC_PHY1_LANE1_RX_CLK 96 +#define ECPRI_CC_PHY1_LANE1_TX_CLK 97 +#define ECPRI_CC_PHY1_LANE2_RX_CLK 98 +#define ECPRI_CC_PHY1_LANE2_TX_CLK 99 +#define ECPRI_CC_PHY1_LANE3_RX_CLK 100 +#define ECPRI_CC_PHY1_LANE3_TX_CLK 101 +#define ECPRI_CC_PHY2_LANE0_RX_CLK 102 +#define ECPRI_CC_PHY2_LANE0_TX_CLK 103 +#define ECPRI_CC_PHY2_LANE1_RX_CLK 104 +#define ECPRI_CC_PHY2_LANE1_TX_CLK 105 +#define ECPRI_CC_PHY2_LANE2_RX_CLK 106 +#define ECPRI_CC_PHY2_LANE2_TX_CLK 107 +#define ECPRI_CC_PHY2_LANE3_RX_CLK 108 +#define ECPRI_CC_PHY2_LANE3_TX_CLK 109 +#define ECPRI_CC_PHY3_LANE0_RX_CLK 110 +#define ECPRI_CC_PHY3_LANE0_TX_CLK 111 +#define ECPRI_CC_PHY3_LANE1_RX_CLK 112 +#define ECPRI_CC_PHY3_LANE1_TX_CLK 113 +#define ECPRI_CC_PHY3_LANE2_RX_CLK 114 +#define ECPRI_CC_PHY3_LANE2_TX_CLK 115 +#define ECPRI_CC_PHY3_LANE3_RX_CLK 116 +#define ECPRI_CC_PHY3_LANE3_TX_CLK 117 +#define ECPRI_CC_PHY4_LANE0_RX_CLK 118 +#define ECPRI_CC_PHY4_LANE0_TX_CLK 119 +#define ECPRI_CC_PHY4_LANE1_RX_CLK 120 +#define ECPRI_CC_PHY4_LANE1_TX_CLK 121 +#define ECPRI_CC_PHY4_LANE2_RX_CLK 122 +#define ECPRI_CC_PHY4_LANE2_TX_CLK 123 +#define ECPRI_CC_PHY4_LANE3_RX_CLK 124 +#define ECPRI_CC_PHY4_LANE3_TX_CLK 125 + +/* ECPRI_CC resets */ +#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ECPRI_SS_BCR 0 +#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_C2C_BCR 1 +#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH0_BCR 2 +#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH1_BCR 3 +#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH2_BCR 4 +#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_WRAPPER_TOP_BCR 5 +#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_MODEM_BCR 6 +#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_NOC_BCR 7 + +#endif diff --git a/include/dt-bindings/clock/qcom,sc8280xp-camcc.h b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h new file mode 100644 index 000000000000..ea5ec73c8c6a --- /dev/null +++ b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Linaro Ltd. + */ + +#ifndef __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__ +#define __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__ + +/* CAMCC clocks */ +#define CAMCC_PLL0 0 +#define CAMCC_PLL0_OUT_EVEN 1 +#define CAMCC_PLL0_OUT_ODD 2 +#define CAMCC_PLL1 3 +#define CAMCC_PLL1_OUT_EVEN 4 +#define CAMCC_PLL2 5 +#define CAMCC_PLL3 6 +#define CAMCC_PLL3_OUT_EVEN 7 +#define CAMCC_PLL4 8 +#define CAMCC_PLL4_OUT_EVEN 9 +#define CAMCC_PLL5 10 +#define CAMCC_PLL5_OUT_EVEN 11 +#define CAMCC_PLL6 12 +#define CAMCC_PLL6_OUT_EVEN 13 +#define CAMCC_PLL7 14 +#define CAMCC_PLL7_OUT_EVEN 15 +#define CAMCC_PLL7_OUT_ODD 16 +#define CAMCC_BPS_AHB_CLK 17 +#define CAMCC_BPS_AREG_CLK 18 +#define CAMCC_BPS_AXI_CLK 19 +#define CAMCC_BPS_CLK 20 +#define CAMCC_BPS_CLK_SRC 21 +#define CAMCC_CAMNOC_AXI_CLK 22 +#define CAMCC_CAMNOC_AXI_CLK_SRC 23 +#define CAMCC_CAMNOC_DCD_XO_CLK 24 +#define CAMCC_CCI_0_CLK 25 +#define CAMCC_CCI_0_CLK_SRC 26 +#define CAMCC_CCI_1_CLK 27 +#define CAMCC_CCI_1_CLK_SRC 28 +#define CAMCC_CCI_2_CLK 29 +#define CAMCC_CCI_2_CLK_SRC 30 +#define CAMCC_CCI_3_CLK 31 +#define CAMCC_CCI_3_CLK_SRC 32 +#define CAMCC_CORE_AHB_CLK 33 +#define CAMCC_CPAS_AHB_CLK 34 +#define CAMCC_CPHY_RX_CLK_SRC 35 +#define CAMCC_CSI0PHYTIMER_CLK 36 +#define CAMCC_CSI0PHYTIMER_CLK_SRC 37 +#define CAMCC_CSI1PHYTIMER_CLK 38 +#define CAMCC_CSI1PHYTIMER_CLK_SRC 39 +#define CAMCC_CSI2PHYTIMER_CLK 40 +#define CAMCC_CSI2PHYTIMER_CLK_SRC 41 +#define CAMCC_CSI3PHYTIMER_CLK 42 +#define CAMCC_CSI3PHYTIMER_CLK_SRC 43 +#define CAMCC_CSIPHY0_CLK 44 +#define CAMCC_CSIPHY1_CLK 45 +#define CAMCC_CSIPHY2_CLK 46 +#define CAMCC_CSIPHY3_CLK 47 +#define CAMCC_FAST_AHB_CLK_SRC 48 +#define CAMCC_GDSC_CLK 49 +#define CAMCC_ICP_AHB_CLK 50 +#define CAMCC_ICP_CLK 51 +#define CAMCC_ICP_CLK_SRC 52 +#define CAMCC_IFE_0_AXI_CLK 53 +#define CAMCC_IFE_0_CLK 54 +#define CAMCC_IFE_0_CLK_SRC 55 +#define CAMCC_IFE_0_CPHY_RX_CLK 56 +#define CAMCC_IFE_0_CSID_CLK 57 +#define CAMCC_IFE_0_CSID_CLK_SRC 58 +#define CAMCC_IFE_0_DSP_CLK 59 +#define CAMCC_IFE_1_AXI_CLK 60 +#define CAMCC_IFE_1_CLK 61 +#define CAMCC_IFE_1_CLK_SRC 62 +#define CAMCC_IFE_1_CPHY_RX_CLK 63 +#define CAMCC_IFE_1_CSID_CLK 64 +#define CAMCC_IFE_1_CSID_CLK_SRC 65 +#define CAMCC_IFE_1_DSP_CLK 66 +#define CAMCC_IFE_2_AXI_CLK 67 +#define CAMCC_IFE_2_CLK 68 +#define CAMCC_IFE_2_CLK_SRC 69 +#define CAMCC_IFE_2_CPHY_RX_CLK 70 +#define CAMCC_IFE_2_CSID_CLK 71 +#define CAMCC_IFE_2_CSID_CLK_SRC 72 +#define CAMCC_IFE_2_DSP_CLK 73 +#define CAMCC_IFE_3_AXI_CLK 74 +#define CAMCC_IFE_3_CLK 75 +#define CAMCC_IFE_3_CLK_SRC 76 +#define CAMCC_IFE_3_CPHY_RX_CLK 77 +#define CAMCC_IFE_3_CSID_CLK 78 +#define CAMCC_IFE_3_CSID_CLK_SRC 79 +#define CAMCC_IFE_3_DSP_CLK 80 +#define CAMCC_IFE_LITE_0_CLK 81 +#define CAMCC_IFE_LITE_0_CLK_SRC 82 +#define CAMCC_IFE_LITE_0_CPHY_RX_CLK 83 +#define CAMCC_IFE_LITE_0_CSID_CLK 84 +#define CAMCC_IFE_LITE_0_CSID_CLK_SRC 85 +#define CAMCC_IFE_LITE_1_CLK 86 +#define CAMCC_IFE_LITE_1_CLK_SRC 87 +#define CAMCC_IFE_LITE_1_CPHY_RX_CLK 88 +#define CAMCC_IFE_LITE_1_CSID_CLK 89 +#define CAMCC_IFE_LITE_1_CSID_CLK_SRC 90 +#define CAMCC_IFE_LITE_2_CLK 91 +#define CAMCC_IFE_LITE_2_CLK_SRC 92 +#define CAMCC_IFE_LITE_2_CPHY_RX_CLK 93 +#define CAMCC_IFE_LITE_2_CSID_CLK 94 +#define CAMCC_IFE_LITE_2_CSID_CLK_SRC 95 +#define CAMCC_IFE_LITE_3_CLK 96 +#define CAMCC_IFE_LITE_3_CLK_SRC 97 +#define CAMCC_IFE_LITE_3_CPHY_RX_CLK 98 +#define CAMCC_IFE_LITE_3_CSID_CLK 99 +#define CAMCC_IFE_LITE_3_CSID_CLK_SRC 100 +#define CAMCC_IPE_0_AHB_CLK 101 +#define CAMCC_IPE_0_AREG_CLK 102 +#define CAMCC_IPE_0_AXI_CLK 103 +#define CAMCC_IPE_0_CLK 104 +#define CAMCC_IPE_0_CLK_SRC 105 +#define CAMCC_IPE_1_AHB_CLK 106 +#define CAMCC_IPE_1_AREG_CLK 107 +#define CAMCC_IPE_1_AXI_CLK 108 +#define CAMCC_IPE_1_CLK 109 +#define CAMCC_JPEG_CLK 110 +#define CAMCC_JPEG_CLK_SRC 111 +#define CAMCC_LRME_CLK 112 +#define CAMCC_LRME_CLK_SRC 113 +#define CAMCC_MCLK0_CLK 114 +#define CAMCC_MCLK0_CLK_SRC 115 +#define CAMCC_MCLK1_CLK 116 +#define CAMCC_MCLK1_CLK_SRC 117 +#define CAMCC_MCLK2_CLK 118 +#define CAMCC_MCLK2_CLK_SRC 119 +#define CAMCC_MCLK3_CLK 120 +#define CAMCC_MCLK3_CLK_SRC 121 +#define CAMCC_MCLK4_CLK 122 +#define CAMCC_MCLK4_CLK_SRC 123 +#define CAMCC_MCLK5_CLK 124 +#define CAMCC_MCLK5_CLK_SRC 125 +#define CAMCC_MCLK6_CLK 126 +#define CAMCC_MCLK6_CLK_SRC 127 +#define CAMCC_MCLK7_CLK 128 +#define CAMCC_MCLK7_CLK_SRC 129 +#define CAMCC_SLEEP_CLK 130 +#define CAMCC_SLEEP_CLK_SRC 131 +#define CAMCC_SLOW_AHB_CLK_SRC 132 +#define CAMCC_XO_CLK_SRC 133 + +/* CAMCC resets */ +#define CAMCC_BPS_BCR 0 +#define CAMCC_CAMNOC_BCR 1 +#define CAMCC_CCI_BCR 2 +#define CAMCC_CPAS_BCR 3 +#define CAMCC_CSI0PHY_BCR 4 +#define CAMCC_CSI1PHY_BCR 5 +#define CAMCC_CSI2PHY_BCR 6 +#define CAMCC_CSI3PHY_BCR 7 +#define CAMCC_ICP_BCR 8 +#define CAMCC_IFE_0_BCR 9 +#define CAMCC_IFE_1_BCR 10 +#define CAMCC_IFE_2_BCR 11 +#define CAMCC_IFE_3_BCR 12 +#define CAMCC_IFE_LITE_0_BCR 13 +#define CAMCC_IFE_LITE_1_BCR 14 +#define CAMCC_IFE_LITE_2_BCR 15 +#define CAMCC_IFE_LITE_3_BCR 16 +#define CAMCC_IPE_0_BCR 17 +#define CAMCC_IPE_1_BCR 18 +#define CAMCC_JPEG_BCR 19 +#define CAMCC_LRME_BCR 20 + +/* CAMCC GDSCRs */ +#define BPS_GDSC 0 +#define IFE_0_GDSC 1 +#define IFE_1_GDSC 2 +#define IFE_2_GDSC 3 +#define IFE_3_GDSC 4 +#define IPE_0_GDSC 5 +#define IPE_1_GDSC 6 +#define TITAN_TOP_GDSC 7 + +#endif /* __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__ */ diff --git a/include/dt-bindings/clock/qcom,sm4450-gcc.h b/include/dt-bindings/clock/qcom,sm4450-gcc.h new file mode 100644 index 000000000000..c18e47a86f40 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm4450-gcc.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H + +/* GCC clocks */ +#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 0 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 1 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3 +#define GCC_BOOT_ROM_AHB_CLK 4 +#define GCC_CAMERA_AHB_CLK 5 +#define GCC_CAMERA_HF_AXI_CLK 6 +#define GCC_CAMERA_SF_AXI_CLK 7 +#define GCC_CAMERA_SLEEP_CLK 8 +#define GCC_CAMERA_XO_CLK 9 +#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11 +#define GCC_DDRSS_GPU_AXI_CLK 12 +#define GCC_DDRSS_PCIE_SF_TBU_CLK 13 +#define GCC_DISP_AHB_CLK 14 +#define GCC_DISP_HF_AXI_CLK 15 +#define GCC_DISP_XO_CLK 16 +#define GCC_EUSB3_0_CLKREF_EN 17 +#define GCC_GP1_CLK 18 +#define GCC_GP1_CLK_SRC 19 +#define GCC_GP2_CLK 20 +#define GCC_GP2_CLK_SRC 21 +#define GCC_GP3_CLK 22 +#define GCC_GP3_CLK_SRC 23 +#define GCC_GPLL0 24 +#define GCC_GPLL0_OUT_EVEN 25 +#define GCC_GPLL0_OUT_ODD 26 +#define GCC_GPLL1 27 +#define GCC_GPLL3 28 +#define GCC_GPLL4 29 +#define GCC_GPLL9 30 +#define GCC_GPLL10 31 +#define GCC_GPU_CFG_AHB_CLK 32 +#define GCC_GPU_GPLL0_CLK_SRC 33 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 34 +#define GCC_GPU_MEMNOC_GFX_CLK 35 +#define GCC_GPU_SNOC_DVM_GFX_CLK 36 +#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_CLK 37 +#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_CLK 38 +#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_CLK 39 +#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_CLK 40 +#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF0_CLK 41 +#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF1_CLK 42 +#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_SF0_CLK 43 +#define GCC_HLOS1_VOTE_MMU_TCU_CLK 44 +#define GCC_PCIE_0_AUX_CLK 45 +#define GCC_PCIE_0_AUX_CLK_SRC 46 +#define GCC_PCIE_0_CFG_AHB_CLK 47 +#define GCC_PCIE_0_CLKREF_EN 48 +#define GCC_PCIE_0_MSTR_AXI_CLK 49 +#define GCC_PCIE_0_PHY_RCHNG_CLK 50 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 51 +#define GCC_PCIE_0_PIPE_CLK 52 +#define GCC_PCIE_0_PIPE_CLK_SRC 53 +#define GCC_PCIE_0_PIPE_DIV2_CLK 54 +#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 55 +#define GCC_PCIE_0_SLV_AXI_CLK 56 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57 +#define GCC_PDM2_CLK 58 +#define GCC_PDM2_CLK_SRC 59 +#define GCC_PDM_AHB_CLK 60 +#define GCC_PDM_XO4_CLK 61 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 63 +#define GCC_QMIP_DISP_AHB_CLK 64 +#define GCC_QMIP_GPU_AHB_CLK 65 +#define GCC_QMIP_PCIE_AHB_CLK 66 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 67 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 68 +#define GCC_QUPV3_WRAP0_CORE_CLK 69 +#define GCC_QUPV3_WRAP0_S0_CLK 70 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71 +#define GCC_QUPV3_WRAP0_S1_CLK 72 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73 +#define GCC_QUPV3_WRAP0_S2_CLK 74 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75 +#define GCC_QUPV3_WRAP0_S3_CLK 76 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77 +#define GCC_QUPV3_WRAP0_S4_CLK 78 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 80 +#define GCC_QUPV3_WRAP1_CORE_CLK 81 +#define GCC_QUPV3_WRAP1_S0_CLK 82 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 83 +#define GCC_QUPV3_WRAP1_S1_CLK 84 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 85 +#define GCC_QUPV3_WRAP1_S2_CLK 86 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 87 +#define GCC_QUPV3_WRAP1_S3_CLK 88 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 89 +#define GCC_QUPV3_WRAP1_S4_CLK 90 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 91 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 92 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 93 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 94 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 95 +#define GCC_SDCC1_AHB_CLK 96 +#define GCC_SDCC1_APPS_CLK 97 +#define GCC_SDCC1_APPS_CLK_SRC 98 +#define GCC_SDCC1_ICE_CORE_CLK 99 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 100 +#define GCC_SDCC2_AHB_CLK 101 +#define GCC_SDCC2_APPS_CLK 102 +#define GCC_SDCC2_APPS_CLK_SRC 103 +#define GCC_UFS_0_CLKREF_EN 104 +#define GCC_UFS_PAD_CLKREF_EN 105 +#define GCC_UFS_PHY_AHB_CLK 106 +#define GCC_UFS_PHY_AXI_CLK 107 +#define GCC_UFS_PHY_AXI_CLK_SRC 108 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 109 +#define GCC_UFS_PHY_ICE_CORE_CLK 110 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 111 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 112 +#define GCC_UFS_PHY_PHY_AUX_CLK 113 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 114 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 115 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 116 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 117 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 118 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 119 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 120 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 121 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 122 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 123 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 124 +#define GCC_USB30_PRIM_MASTER_CLK 125 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 126 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 127 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 128 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 129 +#define GCC_USB30_PRIM_SLEEP_CLK 130 +#define GCC_USB3_0_CLKREF_EN 131 +#define GCC_USB3_PRIM_PHY_AUX_CLK 132 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 133 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 134 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 135 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 136 +#define GCC_VCODEC0_AXI_CLK 137 +#define GCC_VENUS_CTL_AXI_CLK 138 +#define GCC_VIDEO_AHB_CLK 139 +#define GCC_VIDEO_THROTTLE_CORE_CLK 140 +#define GCC_VIDEO_VCODEC0_SYS_CLK 141 +#define GCC_VIDEO_VENUS_CLK_SRC 142 +#define GCC_VIDEO_VENUS_CTL_CLK 143 +#define GCC_VIDEO_XO_CLK 144 + +/* GCC power domains */ +#define GCC_PCIE_0_GDSC 0 +#define GCC_UFS_PHY_GDSC 1 +#define GCC_USB30_PRIM_GDSC 2 +#define GCC_VCODEC0_GDSC 3 +#define GCC_VENUS_GDSC 4 + +/* GCC resets */ +#define GCC_CAMERA_BCR 0 +#define GCC_DISPLAY_BCR 1 +#define GCC_GPU_BCR 2 +#define GCC_PCIE_0_BCR 3 +#define GCC_PCIE_0_LINK_DOWN_BCR 4 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5 +#define GCC_PCIE_0_PHY_BCR 6 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7 +#define GCC_PCIE_PHY_BCR 8 +#define GCC_PCIE_PHY_CFG_AHB_BCR 9 +#define GCC_PCIE_PHY_COM_BCR 10 +#define GCC_PDM_BCR 11 +#define GCC_QUPV3_WRAPPER_0_BCR 12 +#define GCC_QUPV3_WRAPPER_1_BCR 13 +#define GCC_QUSB2PHY_PRIM_BCR 14 +#define GCC_QUSB2PHY_SEC_BCR 15 +#define GCC_SDCC1_BCR 16 +#define GCC_SDCC2_BCR 17 +#define GCC_UFS_PHY_BCR 18 +#define GCC_USB30_PRIM_BCR 19 +#define GCC_USB3_DP_PHY_PRIM_BCR 20 +#define GCC_USB3_DP_PHY_SEC_BCR 21 +#define GCC_USB3_PHY_PRIM_BCR 22 +#define GCC_USB3_PHY_SEC_BCR 23 +#define GCC_USB3PHY_PHY_PRIM_BCR 24 +#define GCC_USB3PHY_PHY_SEC_BCR 25 +#define GCC_VCODEC0_BCR 26 +#define GCC_VENUS_BCR 27 +#define GCC_VIDEO_BCR 28 +#define GCC_VIDEO_VENUS_BCR 29 +#define GCC_VENUS_CTL_AXI_CLK_ARES 30 +#define GCC_VIDEO_VENUS_CTL_CLK_ARES 31 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8550-camcc.h b/include/dt-bindings/clock/qcom,sm8550-camcc.h new file mode 100644 index 000000000000..a2a256691c2b --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8550-camcc.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H + +/* CAM_CC clocks */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_CLK 1 +#define CAM_CC_BPS_CLK_SRC 2 +#define CAM_CC_BPS_FAST_AHB_CLK 3 +#define CAM_CC_CAMNOC_AXI_CLK 4 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 5 +#define CAM_CC_CAMNOC_DCD_XO_CLK 6 +#define CAM_CC_CAMNOC_XO_CLK 7 +#define CAM_CC_CCI_0_CLK 8 +#define CAM_CC_CCI_0_CLK_SRC 9 +#define CAM_CC_CCI_1_CLK 10 +#define CAM_CC_CCI_1_CLK_SRC 11 +#define CAM_CC_CCI_2_CLK 12 +#define CAM_CC_CCI_2_CLK_SRC 13 +#define CAM_CC_CORE_AHB_CLK 14 +#define CAM_CC_CPAS_AHB_CLK 15 +#define CAM_CC_CPAS_BPS_CLK 16 +#define CAM_CC_CPAS_CRE_CLK 17 +#define CAM_CC_CPAS_FAST_AHB_CLK 18 +#define CAM_CC_CPAS_IFE_0_CLK 19 +#define CAM_CC_CPAS_IFE_1_CLK 20 +#define CAM_CC_CPAS_IFE_2_CLK 21 +#define CAM_CC_CPAS_IFE_LITE_CLK 22 +#define CAM_CC_CPAS_IPE_NPS_CLK 23 +#define CAM_CC_CPAS_SBI_CLK 24 +#define CAM_CC_CPAS_SFE_0_CLK 25 +#define CAM_CC_CPAS_SFE_1_CLK 26 +#define CAM_CC_CPHY_RX_CLK_SRC 27 +#define CAM_CC_CRE_AHB_CLK 28 +#define CAM_CC_CRE_CLK 29 +#define CAM_CC_CRE_CLK_SRC 30 +#define CAM_CC_CSI0PHYTIMER_CLK 31 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 32 +#define CAM_CC_CSI1PHYTIMER_CLK 33 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 34 +#define CAM_CC_CSI2PHYTIMER_CLK 35 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 36 +#define CAM_CC_CSI3PHYTIMER_CLK 37 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 38 +#define CAM_CC_CSI4PHYTIMER_CLK 39 +#define CAM_CC_CSI4PHYTIMER_CLK_SRC 40 +#define CAM_CC_CSI5PHYTIMER_CLK 41 +#define CAM_CC_CSI5PHYTIMER_CLK_SRC 42 +#define CAM_CC_CSI6PHYTIMER_CLK 43 +#define CAM_CC_CSI6PHYTIMER_CLK_SRC 44 +#define CAM_CC_CSI7PHYTIMER_CLK 45 +#define CAM_CC_CSI7PHYTIMER_CLK_SRC 46 +#define CAM_CC_CSID_CLK 47 +#define CAM_CC_CSID_CLK_SRC 48 +#define CAM_CC_CSID_CSIPHY_RX_CLK 49 +#define CAM_CC_CSIPHY0_CLK 50 +#define CAM_CC_CSIPHY1_CLK 51 +#define CAM_CC_CSIPHY2_CLK 52 +#define CAM_CC_CSIPHY3_CLK 53 +#define CAM_CC_CSIPHY4_CLK 54 +#define CAM_CC_CSIPHY5_CLK 55 +#define CAM_CC_CSIPHY6_CLK 56 +#define CAM_CC_CSIPHY7_CLK 57 +#define CAM_CC_DRV_AHB_CLK 58 +#define CAM_CC_DRV_XO_CLK 59 +#define CAM_CC_FAST_AHB_CLK_SRC 60 +#define CAM_CC_GDSC_CLK 61 +#define CAM_CC_ICP_AHB_CLK 62 +#define CAM_CC_ICP_CLK 63 +#define CAM_CC_ICP_CLK_SRC 64 +#define CAM_CC_IFE_0_CLK 65 +#define CAM_CC_IFE_0_CLK_SRC 66 +#define CAM_CC_IFE_0_DSP_CLK 67 +#define CAM_CC_IFE_0_DSP_CLK_SRC 68 +#define CAM_CC_IFE_0_FAST_AHB_CLK 69 +#define CAM_CC_IFE_1_CLK 70 +#define CAM_CC_IFE_1_CLK_SRC 71 +#define CAM_CC_IFE_1_DSP_CLK 72 +#define CAM_CC_IFE_1_DSP_CLK_SRC 73 +#define CAM_CC_IFE_1_FAST_AHB_CLK 74 +#define CAM_CC_IFE_2_CLK 75 +#define CAM_CC_IFE_2_CLK_SRC 76 +#define CAM_CC_IFE_2_DSP_CLK 77 +#define CAM_CC_IFE_2_DSP_CLK_SRC 78 +#define CAM_CC_IFE_2_FAST_AHB_CLK 79 +#define CAM_CC_IFE_LITE_AHB_CLK 80 +#define CAM_CC_IFE_LITE_CLK 81 +#define CAM_CC_IFE_LITE_CLK_SRC 82 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 83 +#define CAM_CC_IFE_LITE_CSID_CLK 84 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 85 +#define CAM_CC_IPE_NPS_AHB_CLK 86 +#define CAM_CC_IPE_NPS_CLK 87 +#define CAM_CC_IPE_NPS_CLK_SRC 88 +#define CAM_CC_IPE_NPS_FAST_AHB_CLK 89 +#define CAM_CC_IPE_PPS_CLK 90 +#define CAM_CC_IPE_PPS_FAST_AHB_CLK 91 +#define CAM_CC_JPEG_1_CLK 92 +#define CAM_CC_JPEG_CLK 93 +#define CAM_CC_JPEG_CLK_SRC 94 +#define CAM_CC_MCLK0_CLK 95 +#define CAM_CC_MCLK0_CLK_SRC 96 +#define CAM_CC_MCLK1_CLK 97 +#define CAM_CC_MCLK1_CLK_SRC 98 +#define CAM_CC_MCLK2_CLK 99 +#define CAM_CC_MCLK2_CLK_SRC 100 +#define CAM_CC_MCLK3_CLK 101 +#define CAM_CC_MCLK3_CLK_SRC 102 +#define CAM_CC_MCLK4_CLK 103 +#define CAM_CC_MCLK4_CLK_SRC 104 +#define CAM_CC_MCLK5_CLK 105 +#define CAM_CC_MCLK5_CLK_SRC 106 +#define CAM_CC_MCLK6_CLK 107 +#define CAM_CC_MCLK6_CLK_SRC 108 +#define CAM_CC_MCLK7_CLK 109 +#define CAM_CC_MCLK7_CLK_SRC 110 +#define CAM_CC_PLL0 111 +#define CAM_CC_PLL0_OUT_EVEN 112 +#define CAM_CC_PLL0_OUT_ODD 113 +#define CAM_CC_PLL1 114 +#define CAM_CC_PLL1_OUT_EVEN 115 +#define CAM_CC_PLL2 116 +#define CAM_CC_PLL3 117 +#define CAM_CC_PLL3_OUT_EVEN 118 +#define CAM_CC_PLL4 119 +#define CAM_CC_PLL4_OUT_EVEN 120 +#define CAM_CC_PLL5 121 +#define CAM_CC_PLL5_OUT_EVEN 122 +#define CAM_CC_PLL6 123 +#define CAM_CC_PLL6_OUT_EVEN 124 +#define CAM_CC_PLL7 125 +#define CAM_CC_PLL7_OUT_EVEN 126 +#define CAM_CC_PLL8 127 +#define CAM_CC_PLL8_OUT_EVEN 128 +#define CAM_CC_PLL9 129 +#define CAM_CC_PLL9_OUT_EVEN 130 +#define CAM_CC_PLL10 131 +#define CAM_CC_PLL10_OUT_EVEN 132 +#define CAM_CC_PLL11 133 +#define CAM_CC_PLL11_OUT_EVEN 134 +#define CAM_CC_PLL12 135 +#define CAM_CC_PLL12_OUT_EVEN 136 +#define CAM_CC_QDSS_DEBUG_CLK 137 +#define CAM_CC_QDSS_DEBUG_CLK_SRC 138 +#define CAM_CC_QDSS_DEBUG_XO_CLK 139 +#define CAM_CC_SBI_CLK 140 +#define CAM_CC_SBI_FAST_AHB_CLK 141 +#define CAM_CC_SFE_0_CLK 142 +#define CAM_CC_SFE_0_CLK_SRC 143 +#define CAM_CC_SFE_0_FAST_AHB_CLK 144 +#define CAM_CC_SFE_1_CLK 145 +#define CAM_CC_SFE_1_CLK_SRC 146 +#define CAM_CC_SFE_1_FAST_AHB_CLK 147 +#define CAM_CC_SLEEP_CLK 148 +#define CAM_CC_SLEEP_CLK_SRC 149 +#define CAM_CC_SLOW_AHB_CLK_SRC 150 +#define CAM_CC_XO_CLK_SRC 151 + +/* CAM_CC power domains */ +#define CAM_CC_BPS_GDSC 0 +#define CAM_CC_IFE_0_GDSC 1 +#define CAM_CC_IFE_1_GDSC 2 +#define CAM_CC_IFE_2_GDSC 3 +#define CAM_CC_IPE_0_GDSC 4 +#define CAM_CC_SBI_GDSC 5 +#define CAM_CC_SFE_0_GDSC 6 +#define CAM_CC_SFE_1_GDSC 7 +#define CAM_CC_TITAN_TOP_GDSC 8 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_DRV_BCR 1 +#define CAM_CC_ICP_BCR 2 +#define CAM_CC_IFE_0_BCR 3 +#define CAM_CC_IFE_1_BCR 4 +#define CAM_CC_IFE_2_BCR 5 +#define CAM_CC_IPE_0_BCR 6 +#define CAM_CC_QDSS_DEBUG_BCR 7 +#define CAM_CC_SBI_BCR 8 +#define CAM_CC_SFE_0_BCR 9 +#define CAM_CC_SFE_1_BCR 10 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8650-dispcc.h b/include/dt-bindings/clock/qcom,sm8650-dispcc.h new file mode 100644 index 000000000000..b0a668b395a5 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8650-dispcc.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved + * Copyright (c) 2023, Linaro Ltd. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H +#define _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H + +/* DISP_CC clocks */ +#define DISP_CC_MDSS_ACCU_CLK 0 +#define DISP_CC_MDSS_AHB1_CLK 1 +#define DISP_CC_MDSS_AHB_CLK 2 +#define DISP_CC_MDSS_AHB_CLK_SRC 3 +#define DISP_CC_MDSS_BYTE0_CLK 4 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 5 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 7 +#define DISP_CC_MDSS_BYTE1_CLK 8 +#define DISP_CC_MDSS_BYTE1_CLK_SRC 9 +#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10 +#define DISP_CC_MDSS_BYTE1_INTF_CLK 11 +#define DISP_CC_MDSS_DPTX0_AUX_CLK 12 +#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13 +#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 14 +#define DISP_CC_MDSS_DPTX0_LINK_CLK 15 +#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16 +#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17 +#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18 +#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19 +#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20 +#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21 +#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22 +#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 23 +#define DISP_CC_MDSS_DPTX1_AUX_CLK 24 +#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 25 +#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 26 +#define DISP_CC_MDSS_DPTX1_LINK_CLK 27 +#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 28 +#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 29 +#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 30 +#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 31 +#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 32 +#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 33 +#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 34 +#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 35 +#define DISP_CC_MDSS_DPTX2_AUX_CLK 36 +#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 37 +#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 38 +#define DISP_CC_MDSS_DPTX2_LINK_CLK 39 +#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40 +#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41 +#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42 +#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43 +#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44 +#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45 +#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46 +#define DISP_CC_MDSS_DPTX3_AUX_CLK 47 +#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48 +#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 49 +#define DISP_CC_MDSS_DPTX3_LINK_CLK 50 +#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 51 +#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 52 +#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 53 +#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 54 +#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 55 +#define DISP_CC_MDSS_ESC0_CLK 56 +#define DISP_CC_MDSS_ESC0_CLK_SRC 57 +#define DISP_CC_MDSS_ESC1_CLK 58 +#define DISP_CC_MDSS_ESC1_CLK_SRC 59 +#define DISP_CC_MDSS_MDP1_CLK 60 +#define DISP_CC_MDSS_MDP_CLK 61 +#define DISP_CC_MDSS_MDP_CLK_SRC 62 +#define DISP_CC_MDSS_MDP_LUT1_CLK 63 +#define DISP_CC_MDSS_MDP_LUT_CLK 64 +#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 65 +#define DISP_CC_MDSS_PCLK0_CLK 66 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 67 +#define DISP_CC_MDSS_PCLK1_CLK 68 +#define DISP_CC_MDSS_PCLK1_CLK_SRC 69 +#define DISP_CC_MDSS_RSCC_AHB_CLK 70 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 71 +#define DISP_CC_MDSS_VSYNC1_CLK 72 +#define DISP_CC_MDSS_VSYNC_CLK 73 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 74 +#define DISP_CC_PLL0 75 +#define DISP_CC_PLL1 76 +#define DISP_CC_SLEEP_CLK 77 +#define DISP_CC_SLEEP_CLK_SRC 78 +#define DISP_CC_XO_CLK 79 +#define DISP_CC_XO_CLK_SRC 80 + +/* DISP_CC resets */ +#define DISP_CC_MDSS_CORE_BCR 0 +#define DISP_CC_MDSS_CORE_INT2_BCR 1 +#define DISP_CC_MDSS_RSCC_BCR 2 + +/* DISP_CC GDSCR */ +#define MDSS_GDSC 0 +#define MDSS_INT2_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8650-gcc.h b/include/dt-bindings/clock/qcom,sm8650-gcc.h new file mode 100644 index 000000000000..0c543ba46079 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8650-gcc.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H + +/* GCC clocks */ +#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 1 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3 +#define GCC_BOOT_ROM_AHB_CLK 4 +#define GCC_CAMERA_AHB_CLK 5 +#define GCC_CAMERA_HF_AXI_CLK 6 +#define GCC_CAMERA_SF_AXI_CLK 7 +#define GCC_CAMERA_XO_CLK 8 +#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 9 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 10 +#define GCC_CNOC_PCIE_SF_AXI_CLK 11 +#define GCC_DDRSS_GPU_AXI_CLK 12 +#define GCC_DDRSS_PCIE_SF_QTB_CLK 13 +#define GCC_DISP_AHB_CLK 14 +#define GCC_DISP_HF_AXI_CLK 15 +#define GCC_DISP_XO_CLK 16 +#define GCC_GP1_CLK 17 +#define GCC_GP1_CLK_SRC 18 +#define GCC_GP2_CLK 19 +#define GCC_GP2_CLK_SRC 20 +#define GCC_GP3_CLK 21 +#define GCC_GP3_CLK_SRC 22 +#define GCC_GPLL0 23 +#define GCC_GPLL0_OUT_EVEN 24 +#define GCC_GPLL1 25 +#define GCC_GPLL3 26 +#define GCC_GPLL4 27 +#define GCC_GPLL6 28 +#define GCC_GPLL7 29 +#define GCC_GPLL9 30 +#define GCC_GPU_CFG_AHB_CLK 31 +#define GCC_GPU_GPLL0_CLK_SRC 32 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 33 +#define GCC_GPU_MEMNOC_GFX_CLK 34 +#define GCC_GPU_SNOC_DVM_GFX_CLK 35 +#define GCC_PCIE_0_AUX_CLK 36 +#define GCC_PCIE_0_AUX_CLK_SRC 37 +#define GCC_PCIE_0_CFG_AHB_CLK 38 +#define GCC_PCIE_0_MSTR_AXI_CLK 39 +#define GCC_PCIE_0_PHY_RCHNG_CLK 40 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 41 +#define GCC_PCIE_0_PIPE_CLK 42 +#define GCC_PCIE_0_PIPE_CLK_SRC 43 +#define GCC_PCIE_0_SLV_AXI_CLK 44 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 45 +#define GCC_PCIE_1_AUX_CLK 46 +#define GCC_PCIE_1_AUX_CLK_SRC 47 +#define GCC_PCIE_1_CFG_AHB_CLK 48 +#define GCC_PCIE_1_MSTR_AXI_CLK 49 +#define GCC_PCIE_1_PHY_AUX_CLK 50 +#define GCC_PCIE_1_PHY_AUX_CLK_SRC 51 +#define GCC_PCIE_1_PHY_RCHNG_CLK 52 +#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 53 +#define GCC_PCIE_1_PIPE_CLK 54 +#define GCC_PCIE_1_PIPE_CLK_SRC 55 +#define GCC_PCIE_1_SLV_AXI_CLK 56 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 57 +#define GCC_PDM2_CLK 58 +#define GCC_PDM2_CLK_SRC 59 +#define GCC_PDM_AHB_CLK 60 +#define GCC_PDM_XO4_CLK 61 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 63 +#define GCC_QMIP_DISP_AHB_CLK 64 +#define GCC_QMIP_GPU_AHB_CLK 65 +#define GCC_QMIP_PCIE_AHB_CLK 66 +#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 67 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 68 +#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 69 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 70 +#define GCC_QUPV3_I2C_CORE_CLK 71 +#define GCC_QUPV3_I2C_S0_CLK 72 +#define GCC_QUPV3_I2C_S0_CLK_SRC 73 +#define GCC_QUPV3_I2C_S1_CLK 74 +#define GCC_QUPV3_I2C_S1_CLK_SRC 75 +#define GCC_QUPV3_I2C_S2_CLK 76 +#define GCC_QUPV3_I2C_S2_CLK_SRC 77 +#define GCC_QUPV3_I2C_S3_CLK 78 +#define GCC_QUPV3_I2C_S3_CLK_SRC 79 +#define GCC_QUPV3_I2C_S4_CLK 80 +#define GCC_QUPV3_I2C_S4_CLK_SRC 81 +#define GCC_QUPV3_I2C_S5_CLK 82 +#define GCC_QUPV3_I2C_S5_CLK_SRC 83 +#define GCC_QUPV3_I2C_S6_CLK 84 +#define GCC_QUPV3_I2C_S6_CLK_SRC 85 +#define GCC_QUPV3_I2C_S7_CLK 86 +#define GCC_QUPV3_I2C_S7_CLK_SRC 87 +#define GCC_QUPV3_I2C_S8_CLK 88 +#define GCC_QUPV3_I2C_S8_CLK_SRC 89 +#define GCC_QUPV3_I2C_S9_CLK 90 +#define GCC_QUPV3_I2C_S9_CLK_SRC 91 +#define GCC_QUPV3_I2C_S_AHB_CLK 92 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 93 +#define GCC_QUPV3_WRAP1_CORE_CLK 94 +#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 95 +#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 96 +#define GCC_QUPV3_WRAP1_S0_CLK 97 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 98 +#define GCC_QUPV3_WRAP1_S1_CLK 99 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 100 +#define GCC_QUPV3_WRAP1_S2_CLK 101 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 102 +#define GCC_QUPV3_WRAP1_S3_CLK 103 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 104 +#define GCC_QUPV3_WRAP1_S4_CLK 105 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 106 +#define GCC_QUPV3_WRAP1_S5_CLK 107 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 108 +#define GCC_QUPV3_WRAP1_S6_CLK 109 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 110 +#define GCC_QUPV3_WRAP1_S7_CLK 111 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 112 +#define GCC_QUPV3_WRAP2_CORE_2X_CLK 113 +#define GCC_QUPV3_WRAP2_CORE_CLK 114 +#define GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC 115 +#define GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK 116 +#define GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK 117 +#define GCC_QUPV3_WRAP2_S0_CLK 118 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 119 +#define GCC_QUPV3_WRAP2_S1_CLK 120 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 121 +#define GCC_QUPV3_WRAP2_S2_CLK 122 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 123 +#define GCC_QUPV3_WRAP2_S3_CLK 124 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 125 +#define GCC_QUPV3_WRAP2_S4_CLK 126 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 127 +#define GCC_QUPV3_WRAP2_S5_CLK 128 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 129 +#define GCC_QUPV3_WRAP2_S6_CLK 130 +#define GCC_QUPV3_WRAP2_S6_CLK_SRC 131 +#define GCC_QUPV3_WRAP2_S7_CLK 132 +#define GCC_QUPV3_WRAP2_S7_CLK_SRC 133 +#define GCC_QUPV3_WRAP3_CORE_2X_CLK 134 +#define GCC_QUPV3_WRAP3_CORE_CLK 135 +#define GCC_QUPV3_WRAP3_QSPI_REF_CLK 136 +#define GCC_QUPV3_WRAP3_QSPI_REF_CLK_SRC 137 +#define GCC_QUPV3_WRAP3_S0_CLK 138 +#define GCC_QUPV3_WRAP3_S0_CLK_SRC 139 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 140 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 141 +#define GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK 142 +#define GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK 143 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 144 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 145 +#define GCC_QUPV3_WRAP_3_M_AHB_CLK 146 +#define GCC_QUPV3_WRAP_3_S_AHB_CLK 147 +#define GCC_SDCC2_AHB_CLK 148 +#define GCC_SDCC2_APPS_CLK 149 +#define GCC_SDCC2_APPS_CLK_SRC 150 +#define GCC_SDCC4_AHB_CLK 151 +#define GCC_SDCC4_APPS_CLK 152 +#define GCC_SDCC4_APPS_CLK_SRC 153 +#define GCC_UFS_PHY_AHB_CLK 154 +#define GCC_UFS_PHY_AXI_CLK 155 +#define GCC_UFS_PHY_AXI_CLK_SRC 156 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 157 +#define GCC_UFS_PHY_ICE_CORE_CLK 158 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 159 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 160 +#define GCC_UFS_PHY_PHY_AUX_CLK 161 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 162 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 163 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 165 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 166 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 167 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 169 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 170 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 171 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 172 +#define GCC_USB30_PRIM_MASTER_CLK 173 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 174 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 175 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 176 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 177 +#define GCC_USB30_PRIM_SLEEP_CLK 178 +#define GCC_USB3_PRIM_PHY_AUX_CLK 179 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 180 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 181 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 182 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 183 +#define GCC_VIDEO_AHB_CLK 184 +#define GCC_VIDEO_AXI0_CLK 185 +#define GCC_VIDEO_AXI1_CLK 186 +#define GCC_VIDEO_XO_CLK 187 +#define GCC_GPLL0_AO 188 +#define GCC_GPLL0_OUT_EVEN_AO 189 +#define GCC_GPLL1_AO 190 +#define GCC_GPLL3_AO 191 +#define GCC_GPLL4_AO 192 +#define GCC_GPLL6_AO 193 + +/* GCC resets */ +#define GCC_CAMERA_BCR 0 +#define GCC_DISPLAY_BCR 1 +#define GCC_GPU_BCR 2 +#define GCC_PCIE_0_BCR 3 +#define GCC_PCIE_0_LINK_DOWN_BCR 4 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5 +#define GCC_PCIE_0_PHY_BCR 6 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7 +#define GCC_PCIE_1_BCR 8 +#define GCC_PCIE_1_LINK_DOWN_BCR 9 +#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10 +#define GCC_PCIE_1_PHY_BCR 11 +#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12 +#define GCC_PCIE_PHY_BCR 13 +#define GCC_PCIE_PHY_CFG_AHB_BCR 14 +#define GCC_PCIE_PHY_COM_BCR 15 +#define GCC_PDM_BCR 16 +#define GCC_QUPV3_WRAPPER_1_BCR 17 +#define GCC_QUPV3_WRAPPER_2_BCR 18 +#define GCC_QUPV3_WRAPPER_3_BCR 19 +#define GCC_QUPV3_WRAPPER_I2C_BCR 20 +#define GCC_QUSB2PHY_PRIM_BCR 21 +#define GCC_QUSB2PHY_SEC_BCR 22 +#define GCC_SDCC2_BCR 23 +#define GCC_SDCC4_BCR 24 +#define GCC_UFS_PHY_BCR 25 +#define GCC_USB30_PRIM_BCR 26 +#define GCC_USB3_DP_PHY_PRIM_BCR 27 +#define GCC_USB3_DP_PHY_SEC_BCR 28 +#define GCC_USB3_PHY_PRIM_BCR 29 +#define GCC_USB3_PHY_SEC_BCR 30 +#define GCC_USB3PHY_PHY_PRIM_BCR 31 +#define GCC_USB3PHY_PHY_SEC_BCR 32 +#define GCC_VIDEO_AXI0_CLK_ARES 33 +#define GCC_VIDEO_AXI1_CLK_ARES 34 +#define GCC_VIDEO_BCR 35 + +/* GCC power domains */ +#define PCIE_0_GDSC 0 +#define PCIE_0_PHY_GDSC 1 +#define PCIE_1_GDSC 2 +#define PCIE_1_PHY_GDSC 3 +#define UFS_PHY_GDSC 4 +#define UFS_MEM_PHY_GDSC 5 +#define USB30_PRIM_GDSC 6 +#define USB3_PHY_GDSC 7 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8650-gpucc.h b/include/dt-bindings/clock/qcom,sm8650-gpucc.h new file mode 100644 index 000000000000..d0dc457cfe75 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8650-gpucc.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H + +/* GPU_CC clocks */ +#define GPU_CC_AHB_CLK 0 +#define GPU_CC_CRC_AHB_CLK 1 +#define GPU_CC_CX_ACCU_SHIFT_CLK 2 +#define GPU_CC_CX_FF_CLK 3 +#define GPU_CC_CX_GMU_CLK 4 +#define GPU_CC_CXO_AON_CLK 5 +#define GPU_CC_CXO_CLK 6 +#define GPU_CC_DEMET_CLK 7 +#define GPU_CC_DPM_CLK 8 +#define GPU_CC_FF_CLK_SRC 9 +#define GPU_CC_FREQ_MEASURE_CLK 10 +#define GPU_CC_GMU_CLK_SRC 11 +#define GPU_CC_GX_ACCU_SHIFT_CLK 12 +#define GPU_CC_GX_FF_CLK 13 +#define GPU_CC_GX_GFX3D_CLK 14 +#define GPU_CC_GX_GFX3D_RDVM_CLK 15 +#define GPU_CC_GX_GMU_CLK 16 +#define GPU_CC_GX_VSENSE_CLK 17 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 18 +#define GPU_CC_HUB_AON_CLK 19 +#define GPU_CC_HUB_CLK_SRC 20 +#define GPU_CC_HUB_CX_INT_CLK 21 +#define GPU_CC_HUB_DIV_CLK_SRC 22 +#define GPU_CC_MEMNOC_GFX_CLK 23 +#define GPU_CC_PLL0 24 +#define GPU_CC_PLL1 25 +#define GPU_CC_SLEEP_CLK 26 + +/* GDSCs */ +#define GPU_GX_GDSC 0 +#define GPU_CX_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8650-tcsr.h b/include/dt-bindings/clock/qcom,sm8650-tcsr.h new file mode 100644 index 000000000000..b2c72d492f1f --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8650-tcsr.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H +#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H + +/* TCSR CC clocks */ +#define TCSR_PCIE_0_CLKREF_EN 0 +#define TCSR_PCIE_1_CLKREF_EN 1 +#define TCSR_UFS_CLKREF_EN 2 +#define TCSR_UFS_PAD_CLKREF_EN 3 +#define TCSR_USB2_CLKREF_EN 4 +#define TCSR_USB3_CLKREF_EN 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h index e24ee840cfdb..c557b78dc572 100644 --- a/include/dt-bindings/clock/qcom,videocc-sm8150.h +++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h @@ -16,6 +16,10 @@ /* VIDEO_CC Resets */ #define VIDEO_CC_MVSC_CORE_CLK_BCR 0 +#define VIDEO_CC_INTERFACE_BCR 1 +#define VIDEO_CC_MVS0_BCR 2 +#define VIDEO_CC_MVS1_BCR 3 +#define VIDEO_CC_MVSC_BCR 4 /* VIDEO_CC GDSCRs */ #define VENUS_GDSC 0 diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h new file mode 100644 index 000000000000..24ba9e2a5cf6 --- /dev/null +++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H +#define _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H + +/* GCC clocks */ +#define GCC_AGGRE_NOC_USB_NORTH_AXI_CLK 0 +#define GCC_AGGRE_NOC_USB_SOUTH_AXI_CLK 1 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 2 +#define GCC_AGGRE_USB2_PRIM_AXI_CLK 3 +#define GCC_AGGRE_USB3_MP_AXI_CLK 4 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 5 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 6 +#define GCC_AGGRE_USB3_TERT_AXI_CLK 7 +#define GCC_AGGRE_USB4_0_AXI_CLK 8 +#define GCC_AGGRE_USB4_1_AXI_CLK 9 +#define GCC_AGGRE_USB4_2_AXI_CLK 10 +#define GCC_AGGRE_USB_NOC_AXI_CLK 11 +#define GCC_AV1E_AHB_CLK 12 +#define GCC_AV1E_AXI_CLK 13 +#define GCC_AV1E_XO_CLK 14 +#define GCC_BOOT_ROM_AHB_CLK 15 +#define GCC_CAMERA_AHB_CLK 16 +#define GCC_CAMERA_HF_AXI_CLK 17 +#define GCC_CAMERA_SF_AXI_CLK 18 +#define GCC_CAMERA_XO_CLK 19 +#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 20 +#define GCC_CFG_NOC_PCIE_ANOC_NORTH_AHB_CLK 21 +#define GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK 22 +#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 23 +#define GCC_CFG_NOC_USB3_MP_AXI_CLK 24 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 25 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 26 +#define GCC_CFG_NOC_USB3_TERT_AXI_CLK 27 +#define GCC_CFG_NOC_USB_ANOC_AHB_CLK 28 +#define GCC_CFG_NOC_USB_ANOC_NORTH_AHB_CLK 29 +#define GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK 30 +#define GCC_CNOC_PCIE1_TUNNEL_CLK 31 +#define GCC_CNOC_PCIE2_TUNNEL_CLK 32 +#define GCC_CNOC_PCIE_NORTH_SF_AXI_CLK 33 +#define GCC_CNOC_PCIE_SOUTH_SF_AXI_CLK 34 +#define GCC_CNOC_PCIE_TUNNEL_CLK 35 +#define GCC_DDRSS_GPU_AXI_CLK 36 +#define GCC_DISP_AHB_CLK 37 +#define GCC_DISP_HF_AXI_CLK 38 +#define GCC_DISP_XO_CLK 39 +#define GCC_GP1_CLK 40 +#define GCC_GP1_CLK_SRC 41 +#define GCC_GP2_CLK 42 +#define GCC_GP2_CLK_SRC 43 +#define GCC_GP3_CLK 44 +#define GCC_GP3_CLK_SRC 45 +#define GCC_GPLL0 46 +#define GCC_GPLL0_OUT_EVEN 47 +#define GCC_GPLL4 48 +#define GCC_GPLL7 49 +#define GCC_GPLL8 50 +#define GCC_GPLL9 51 +#define GCC_GPU_CFG_AHB_CLK 52 +#define GCC_GPU_GPLL0_CPH_CLK_SRC 53 +#define GCC_GPU_GPLL0_DIV_CPH_CLK_SRC 54 +#define GCC_GPU_MEMNOC_GFX_CLK 55 +#define GCC_GPU_SNOC_DVM_GFX_CLK 56 +#define GCC_PCIE0_PHY_RCHNG_CLK 57 +#define GCC_PCIE1_PHY_RCHNG_CLK 58 +#define GCC_PCIE2_PHY_RCHNG_CLK 59 +#define GCC_PCIE_0_AUX_CLK 60 +#define GCC_PCIE_0_AUX_CLK_SRC 61 +#define GCC_PCIE_0_CFG_AHB_CLK 62 +#define GCC_PCIE_0_MSTR_AXI_CLK 63 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 64 +#define GCC_PCIE_0_PIPE_CLK 65 +#define GCC_PCIE_0_SLV_AXI_CLK 66 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 67 +#define GCC_PCIE_1_AUX_CLK 68 +#define GCC_PCIE_1_AUX_CLK_SRC 69 +#define GCC_PCIE_1_CFG_AHB_CLK 70 +#define GCC_PCIE_1_MSTR_AXI_CLK 71 +#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 72 +#define GCC_PCIE_1_PIPE_CLK 73 +#define GCC_PCIE_1_SLV_AXI_CLK 74 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 75 +#define GCC_PCIE_2_AUX_CLK 76 +#define GCC_PCIE_2_AUX_CLK_SRC 77 +#define GCC_PCIE_2_CFG_AHB_CLK 78 +#define GCC_PCIE_2_MSTR_AXI_CLK 79 +#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 80 +#define GCC_PCIE_2_PIPE_CLK 81 +#define GCC_PCIE_2_SLV_AXI_CLK 82 +#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 83 +#define GCC_PCIE_3_AUX_CLK 84 +#define GCC_PCIE_3_AUX_CLK_SRC 85 +#define GCC_PCIE_3_CFG_AHB_CLK 86 +#define GCC_PCIE_3_MSTR_AXI_CLK 87 +#define GCC_PCIE_3_PHY_AUX_CLK 88 +#define GCC_PCIE_3_PHY_RCHNG_CLK 89 +#define GCC_PCIE_3_PHY_RCHNG_CLK_SRC 90 +#define GCC_PCIE_3_PIPE_CLK 91 +#define GCC_PCIE_3_PIPE_DIV_CLK_SRC 92 +#define GCC_PCIE_3_PIPEDIV2_CLK 93 +#define GCC_PCIE_3_SLV_AXI_CLK 94 +#define GCC_PCIE_3_SLV_Q2A_AXI_CLK 95 +#define GCC_PCIE_4_AUX_CLK 96 +#define GCC_PCIE_4_AUX_CLK_SRC 97 +#define GCC_PCIE_4_CFG_AHB_CLK 98 +#define GCC_PCIE_4_MSTR_AXI_CLK 99 +#define GCC_PCIE_4_PHY_RCHNG_CLK 100 +#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 101 +#define GCC_PCIE_4_PIPE_CLK 102 +#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 103 +#define GCC_PCIE_4_PIPEDIV2_CLK 104 +#define GCC_PCIE_4_SLV_AXI_CLK 105 +#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 106 +#define GCC_PCIE_5_AUX_CLK 107 +#define GCC_PCIE_5_AUX_CLK_SRC 108 +#define GCC_PCIE_5_CFG_AHB_CLK 109 +#define GCC_PCIE_5_MSTR_AXI_CLK 110 +#define GCC_PCIE_5_PHY_RCHNG_CLK 111 +#define GCC_PCIE_5_PHY_RCHNG_CLK_SRC 112 +#define GCC_PCIE_5_PIPE_CLK 113 +#define GCC_PCIE_5_PIPE_DIV_CLK_SRC 114 +#define GCC_PCIE_5_PIPEDIV2_CLK 115 +#define GCC_PCIE_5_SLV_AXI_CLK 116 +#define GCC_PCIE_5_SLV_Q2A_AXI_CLK 117 +#define GCC_PCIE_6A_AUX_CLK 118 +#define GCC_PCIE_6A_AUX_CLK_SRC 119 +#define GCC_PCIE_6A_CFG_AHB_CLK 120 +#define GCC_PCIE_6A_MSTR_AXI_CLK 121 +#define GCC_PCIE_6A_PHY_AUX_CLK 122 +#define GCC_PCIE_6A_PHY_RCHNG_CLK 123 +#define GCC_PCIE_6A_PHY_RCHNG_CLK_SRC 124 +#define GCC_PCIE_6A_PIPE_CLK 125 +#define GCC_PCIE_6A_PIPE_DIV_CLK_SRC 126 +#define GCC_PCIE_6A_PIPEDIV2_CLK 127 +#define GCC_PCIE_6A_SLV_AXI_CLK 128 +#define GCC_PCIE_6A_SLV_Q2A_AXI_CLK 129 +#define GCC_PCIE_6B_AUX_CLK 130 +#define GCC_PCIE_6B_AUX_CLK_SRC 131 +#define GCC_PCIE_6B_CFG_AHB_CLK 132 +#define GCC_PCIE_6B_MSTR_AXI_CLK 133 +#define GCC_PCIE_6B_PHY_AUX_CLK 134 +#define GCC_PCIE_6B_PHY_RCHNG_CLK 135 +#define GCC_PCIE_6B_PHY_RCHNG_CLK_SRC 136 +#define GCC_PCIE_6B_PIPE_CLK 137 +#define GCC_PCIE_6B_PIPE_DIV_CLK_SRC 138 +#define GCC_PCIE_6B_PIPEDIV2_CLK 139 +#define GCC_PCIE_6B_SLV_AXI_CLK 140 +#define GCC_PCIE_6B_SLV_Q2A_AXI_CLK 141 +#define GCC_PCIE_RSCC_AHB_CLK 142 +#define GCC_PCIE_RSCC_XO_CLK 143 +#define GCC_PCIE_RSCC_XO_CLK_SRC 144 +#define GCC_PDM2_CLK 145 +#define GCC_PDM2_CLK_SRC 146 +#define GCC_PDM_AHB_CLK 147 +#define GCC_PDM_XO4_CLK 148 +#define GCC_QMIP_AV1E_AHB_CLK 149 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 150 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 151 +#define GCC_QMIP_DISP_AHB_CLK 152 +#define GCC_QMIP_GPU_AHB_CLK 153 +#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 154 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 155 +#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 156 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 157 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 158 +#define GCC_QUPV3_WRAP0_CORE_CLK 159 +#define GCC_QUPV3_WRAP0_QSPI_S2_CLK 160 +#define GCC_QUPV3_WRAP0_QSPI_S3_CLK 161 +#define GCC_QUPV3_WRAP0_S0_CLK 162 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 163 +#define GCC_QUPV3_WRAP0_S1_CLK 164 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 165 +#define GCC_QUPV3_WRAP0_S2_CLK 166 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 167 +#define GCC_QUPV3_WRAP0_S2_DIV_CLK_SRC 168 +#define GCC_QUPV3_WRAP0_S3_CLK 169 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 170 +#define GCC_QUPV3_WRAP0_S3_DIV_CLK_SRC 171 +#define GCC_QUPV3_WRAP0_S4_CLK 172 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 173 +#define GCC_QUPV3_WRAP0_S5_CLK 174 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 175 +#define GCC_QUPV3_WRAP0_S6_CLK 176 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 177 +#define GCC_QUPV3_WRAP0_S7_CLK 178 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 179 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 180 +#define GCC_QUPV3_WRAP1_CORE_CLK 181 +#define GCC_QUPV3_WRAP1_QSPI_S2_CLK 182 +#define GCC_QUPV3_WRAP1_QSPI_S3_CLK 183 +#define GCC_QUPV3_WRAP1_S0_CLK 184 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 185 +#define GCC_QUPV3_WRAP1_S1_CLK 186 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 187 +#define GCC_QUPV3_WRAP1_S2_CLK 188 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 189 +#define GCC_QUPV3_WRAP1_S2_DIV_CLK_SRC 190 +#define GCC_QUPV3_WRAP1_S3_CLK 191 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 192 +#define GCC_QUPV3_WRAP1_S3_DIV_CLK_SRC 193 +#define GCC_QUPV3_WRAP1_S4_CLK 194 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 195 +#define GCC_QUPV3_WRAP1_S5_CLK 196 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 197 +#define GCC_QUPV3_WRAP1_S6_CLK 198 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 199 +#define GCC_QUPV3_WRAP1_S7_CLK 200 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 201 +#define GCC_QUPV3_WRAP2_CORE_2X_CLK 202 +#define GCC_QUPV3_WRAP2_CORE_CLK 203 +#define GCC_QUPV3_WRAP2_QSPI_S2_CLK 204 +#define GCC_QUPV3_WRAP2_QSPI_S3_CLK 205 +#define GCC_QUPV3_WRAP2_S0_CLK 206 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 207 +#define GCC_QUPV3_WRAP2_S1_CLK 208 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 209 +#define GCC_QUPV3_WRAP2_S2_CLK 210 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 211 +#define GCC_QUPV3_WRAP2_S2_DIV_CLK_SRC 212 +#define GCC_QUPV3_WRAP2_S3_CLK 213 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 214 +#define GCC_QUPV3_WRAP2_S3_DIV_CLK_SRC 215 +#define GCC_QUPV3_WRAP2_S4_CLK 216 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 217 +#define GCC_QUPV3_WRAP2_S5_CLK 218 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 219 +#define GCC_QUPV3_WRAP2_S6_CLK 220 +#define GCC_QUPV3_WRAP2_S6_CLK_SRC 221 +#define GCC_QUPV3_WRAP2_S7_CLK 222 +#define GCC_QUPV3_WRAP2_S7_CLK_SRC 223 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 224 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 225 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 226 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 227 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 228 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 229 +#define GCC_SDCC2_AHB_CLK 230 +#define GCC_SDCC2_APPS_CLK 231 +#define GCC_SDCC2_APPS_CLK_SRC 232 +#define GCC_SDCC4_AHB_CLK 233 +#define GCC_SDCC4_APPS_CLK 234 +#define GCC_SDCC4_APPS_CLK_SRC 235 +#define GCC_SYS_NOC_USB_AXI_CLK 236 +#define GCC_UFS_PHY_AHB_CLK 237 +#define GCC_UFS_PHY_AXI_CLK 238 +#define GCC_UFS_PHY_AXI_CLK_SRC 239 +#define GCC_UFS_PHY_ICE_CORE_CLK 240 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 241 +#define GCC_UFS_PHY_PHY_AUX_CLK 242 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 243 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 244 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 245 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 246 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 247 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 248 +#define GCC_USB20_MASTER_CLK 249 +#define GCC_USB20_MASTER_CLK_SRC 250 +#define GCC_USB20_MOCK_UTMI_CLK 251 +#define GCC_USB20_MOCK_UTMI_CLK_SRC 252 +#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 253 +#define GCC_USB20_SLEEP_CLK 254 +#define GCC_USB30_MP_MASTER_CLK 255 +#define GCC_USB30_MP_MASTER_CLK_SRC 256 +#define GCC_USB30_MP_MOCK_UTMI_CLK 257 +#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 258 +#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 259 +#define GCC_USB30_MP_SLEEP_CLK 260 +#define GCC_USB30_PRIM_MASTER_CLK 261 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 262 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 263 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 264 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 265 +#define GCC_USB30_PRIM_SLEEP_CLK 266 +#define GCC_USB30_SEC_MASTER_CLK 267 +#define GCC_USB30_SEC_MASTER_CLK_SRC 268 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 269 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 270 +#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 271 +#define GCC_USB30_SEC_SLEEP_CLK 272 +#define GCC_USB30_TERT_MASTER_CLK 273 +#define GCC_USB30_TERT_MASTER_CLK_SRC 274 +#define GCC_USB30_TERT_MOCK_UTMI_CLK 275 +#define GCC_USB30_TERT_MOCK_UTMI_CLK_SRC 276 +#define GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC 277 +#define GCC_USB30_TERT_SLEEP_CLK 278 +#define GCC_USB3_MP_PHY_AUX_CLK 279 +#define GCC_USB3_MP_PHY_AUX_CLK_SRC 280 +#define GCC_USB3_MP_PHY_COM_AUX_CLK 281 +#define GCC_USB3_MP_PHY_PIPE_0_CLK 282 +#define GCC_USB3_MP_PHY_PIPE_1_CLK 283 +#define GCC_USB3_PRIM_PHY_AUX_CLK 284 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 285 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 286 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 287 +#define GCC_USB3_SEC_PHY_AUX_CLK 288 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 289 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 290 +#define GCC_USB3_SEC_PHY_PIPE_CLK 291 +#define GCC_USB3_TERT_PHY_AUX_CLK 292 +#define GCC_USB3_TERT_PHY_AUX_CLK_SRC 293 +#define GCC_USB3_TERT_PHY_COM_AUX_CLK 294 +#define GCC_USB3_TERT_PHY_PIPE_CLK 295 +#define GCC_USB4_0_CFG_AHB_CLK 296 +#define GCC_USB4_0_DP0_CLK 297 +#define GCC_USB4_0_DP1_CLK 298 +#define GCC_USB4_0_MASTER_CLK 299 +#define GCC_USB4_0_MASTER_CLK_SRC 300 +#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK 301 +#define GCC_USB4_0_PHY_PCIE_PIPE_CLK 302 +#define GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC 303 +#define GCC_USB4_0_PHY_RX0_CLK 304 +#define GCC_USB4_0_PHY_RX1_CLK 305 +#define GCC_USB4_0_PHY_USB_PIPE_CLK 306 +#define GCC_USB4_0_SB_IF_CLK 307 +#define GCC_USB4_0_SB_IF_CLK_SRC 308 +#define GCC_USB4_0_SYS_CLK 309 +#define GCC_USB4_0_TMU_CLK 310 +#define GCC_USB4_0_TMU_CLK_SRC 311 +#define GCC_USB4_1_CFG_AHB_CLK 312 +#define GCC_USB4_1_DP0_CLK 313 +#define GCC_USB4_1_DP1_CLK 314 +#define GCC_USB4_1_MASTER_CLK 315 +#define GCC_USB4_1_MASTER_CLK_SRC 316 +#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 317 +#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 318 +#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 319 +#define GCC_USB4_1_PHY_RX0_CLK 320 +#define GCC_USB4_1_PHY_RX1_CLK 321 +#define GCC_USB4_1_PHY_USB_PIPE_CLK 322 +#define GCC_USB4_1_SB_IF_CLK 323 +#define GCC_USB4_1_SB_IF_CLK_SRC 324 +#define GCC_USB4_1_SYS_CLK 325 +#define GCC_USB4_1_TMU_CLK 326 +#define GCC_USB4_1_TMU_CLK_SRC 327 +#define GCC_USB4_2_CFG_AHB_CLK 328 +#define GCC_USB4_2_DP0_CLK 329 +#define GCC_USB4_2_DP1_CLK 330 +#define GCC_USB4_2_MASTER_CLK 331 +#define GCC_USB4_2_MASTER_CLK_SRC 332 +#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK 333 +#define GCC_USB4_2_PHY_PCIE_PIPE_CLK 334 +#define GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC 335 +#define GCC_USB4_2_PHY_RX0_CLK 336 +#define GCC_USB4_2_PHY_RX1_CLK 337 +#define GCC_USB4_2_PHY_USB_PIPE_CLK 338 +#define GCC_USB4_2_SB_IF_CLK 339 +#define GCC_USB4_2_SB_IF_CLK_SRC 340 +#define GCC_USB4_2_SYS_CLK 341 +#define GCC_USB4_2_TMU_CLK 342 +#define GCC_USB4_2_TMU_CLK_SRC 343 +#define GCC_VIDEO_AHB_CLK 344 +#define GCC_VIDEO_AXI0_CLK 345 +#define GCC_VIDEO_AXI1_CLK 346 +#define GCC_VIDEO_XO_CLK 347 +#define GCC_PCIE_3_PIPE_CLK_SRC 348 +#define GCC_PCIE_4_PIPE_CLK_SRC 349 +#define GCC_PCIE_5_PIPE_CLK_SRC 350 +#define GCC_PCIE_6A_PIPE_CLK_SRC 351 +#define GCC_PCIE_6B_PIPE_CLK_SRC 352 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 353 +#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 354 +#define GCC_USB3_TERT_PHY_PIPE_CLK_SRC 355 + +/* GCC power domains */ +#define GCC_PCIE_0_TUNNEL_GDSC 0 +#define GCC_PCIE_1_TUNNEL_GDSC 1 +#define GCC_PCIE_2_TUNNEL_GDSC 2 +#define GCC_PCIE_3_GDSC 3 +#define GCC_PCIE_3_PHY_GDSC 4 +#define GCC_PCIE_4_GDSC 5 +#define GCC_PCIE_4_PHY_GDSC 6 +#define GCC_PCIE_5_GDSC 7 +#define GCC_PCIE_5_PHY_GDSC 8 +#define GCC_PCIE_6_PHY_GDSC 9 +#define GCC_PCIE_6A_GDSC 10 +#define GCC_PCIE_6B_GDSC 11 +#define GCC_UFS_MEM_PHY_GDSC 12 +#define GCC_UFS_PHY_GDSC 13 +#define GCC_USB20_PRIM_GDSC 14 +#define GCC_USB30_MP_GDSC 15 +#define GCC_USB30_PRIM_GDSC 16 +#define GCC_USB30_SEC_GDSC 17 +#define GCC_USB30_TERT_GDSC 18 +#define GCC_USB3_MP_SS0_PHY_GDSC 19 +#define GCC_USB3_MP_SS1_PHY_GDSC 20 +#define GCC_USB4_0_GDSC 21 +#define GCC_USB4_1_GDSC 22 +#define GCC_USB4_2_GDSC 23 +#define GCC_USB_0_PHY_GDSC 24 +#define GCC_USB_1_PHY_GDSC 25 +#define GCC_USB_2_PHY_GDSC 26 + +/* GCC resets */ +#define GCC_AV1E_BCR 0 +#define GCC_CAMERA_BCR 1 +#define GCC_DISPLAY_BCR 2 +#define GCC_GPU_BCR 3 +#define GCC_PCIE_0_LINK_DOWN_BCR 4 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5 +#define GCC_PCIE_0_PHY_BCR 6 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7 +#define GCC_PCIE_0_TUNNEL_BCR 8 +#define GCC_PCIE_1_LINK_DOWN_BCR 9 +#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10 +#define GCC_PCIE_1_PHY_BCR 11 +#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12 +#define GCC_PCIE_1_TUNNEL_BCR 13 +#define GCC_PCIE_2_LINK_DOWN_BCR 14 +#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 15 +#define GCC_PCIE_2_PHY_BCR 16 +#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 17 +#define GCC_PCIE_2_TUNNEL_BCR 18 +#define GCC_PCIE_3_BCR 19 +#define GCC_PCIE_3_LINK_DOWN_BCR 20 +#define GCC_PCIE_3_NOCSR_COM_PHY_BCR 21 +#define GCC_PCIE_3_PHY_BCR 22 +#define GCC_PCIE_3_PHY_NOCSR_COM_PHY_BCR 23 +#define GCC_PCIE_4_BCR 24 +#define GCC_PCIE_4_LINK_DOWN_BCR 25 +#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 26 +#define GCC_PCIE_4_PHY_BCR 27 +#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 28 +#define GCC_PCIE_5_BCR 29 +#define GCC_PCIE_5_LINK_DOWN_BCR 30 +#define GCC_PCIE_5_NOCSR_COM_PHY_BCR 31 +#define GCC_PCIE_5_PHY_BCR 32 +#define GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR 33 +#define GCC_PCIE_6A_BCR 34 +#define GCC_PCIE_6A_LINK_DOWN_BCR 35 +#define GCC_PCIE_6A_NOCSR_COM_PHY_BCR 36 +#define GCC_PCIE_6A_PHY_BCR 37 +#define GCC_PCIE_6A_PHY_NOCSR_COM_PHY_BCR 38 +#define GCC_PCIE_6B_BCR 39 +#define GCC_PCIE_6B_LINK_DOWN_BCR 40 +#define GCC_PCIE_6B_NOCSR_COM_PHY_BCR 41 +#define GCC_PCIE_6B_PHY_BCR 42 +#define GCC_PCIE_6B_PHY_NOCSR_COM_PHY_BCR 43 +#define GCC_PCIE_PHY_BCR 44 +#define GCC_PCIE_PHY_CFG_AHB_BCR 45 +#define GCC_PCIE_PHY_COM_BCR 46 +#define GCC_PCIE_RSCC_BCR 47 +#define GCC_PDM_BCR 48 +#define GCC_QUPV3_WRAPPER_0_BCR 49 +#define GCC_QUPV3_WRAPPER_1_BCR 50 +#define GCC_QUPV3_WRAPPER_2_BCR 51 +#define GCC_QUSB2PHY_HS0_MP_BCR 52 +#define GCC_QUSB2PHY_HS1_MP_BCR 53 +#define GCC_QUSB2PHY_PRIM_BCR 54 +#define GCC_QUSB2PHY_SEC_BCR 55 +#define GCC_QUSB2PHY_TERT_BCR 56 +#define GCC_QUSB2PHY_USB20_HS_BCR 57 +#define GCC_SDCC2_BCR 58 +#define GCC_SDCC4_BCR 59 +#define GCC_UFS_PHY_BCR 60 +#define GCC_USB20_PRIM_BCR 61 +#define GCC_USB30_MP_BCR 62 +#define GCC_USB30_PRIM_BCR 63 +#define GCC_USB30_SEC_BCR 64 +#define GCC_USB30_TERT_BCR 65 +#define GCC_USB3_MP_SS0_PHY_BCR 66 +#define GCC_USB3_MP_SS1_PHY_BCR 67 +#define GCC_USB3_PHY_PRIM_BCR 68 +#define GCC_USB3_PHY_SEC_BCR 69 +#define GCC_USB3_PHY_TERT_BCR 70 +#define GCC_USB3_UNIPHY_MP0_BCR 71 +#define GCC_USB3_UNIPHY_MP1_BCR 72 +#define GCC_USB3PHY_PHY_PRIM_BCR 73 +#define GCC_USB3PHY_PHY_SEC_BCR 74 +#define GCC_USB3PHY_PHY_TERT_BCR 75 +#define GCC_USB3UNIPHY_PHY_MP0_BCR 76 +#define GCC_USB3UNIPHY_PHY_MP1_BCR 77 +#define GCC_USB4_0_BCR 78 +#define GCC_USB4_0_DP0_PHY_PRIM_BCR 79 +#define GCC_USB4_1_DP0_PHY_SEC_BCR 80 +#define GCC_USB4_2_DP0_PHY_TERT_BCR 81 +#define GCC_USB4_1_BCR 82 +#define GCC_USB4_2_BCR 83 +#define GCC_USB_0_PHY_BCR 84 +#define GCC_USB_1_PHY_BCR 85 +#define GCC_USB_2_PHY_BCR 86 +#define GCC_VIDEO_BCR 87 +#endif diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h new file mode 100644 index 000000000000..410725b778a8 --- /dev/null +++ b/include/dt-bindings/clock/r9a08g045-cpg.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2023 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ +#define __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* R9A08G045 CPG Core Clocks */ +#define R9A08G045_CLK_I 0 +#define R9A08G045_CLK_I2 1 +#define R9A08G045_CLK_I3 2 +#define R9A08G045_CLK_S0 3 +#define R9A08G045_CLK_SPI0 4 +#define R9A08G045_CLK_SPI1 5 +#define R9A08G045_CLK_SD0 6 +#define R9A08G045_CLK_SD1 7 +#define R9A08G045_CLK_SD2 8 +#define R9A08G045_CLK_M0 9 +#define R9A08G045_CLK_HP 10 +#define R9A08G045_CLK_TSU 11 +#define R9A08G045_CLK_ZT 12 +#define R9A08G045_CLK_P0 13 +#define R9A08G045_CLK_P1 14 +#define R9A08G045_CLK_P2 15 +#define R9A08G045_CLK_P3 16 +#define R9A08G045_CLK_P4 17 +#define R9A08G045_CLK_P5 18 +#define R9A08G045_CLK_AT 19 +#define R9A08G045_CLK_OC0 20 +#define R9A08G045_CLK_OC1 21 +#define R9A08G045_OSCCLK 22 +#define R9A08G045_OSCCLK2 23 +#define R9A08G045_SWD 24 + +/* R9A08G045 Module Clocks */ +#define R9A08G045_OCTA_ACLK 0 +#define R9A08G045_OCTA_MCLK 1 +#define R9A08G045_CA55_SCLK 2 +#define R9A08G045_CA55_PCLK 3 +#define R9A08G045_CA55_ATCLK 4 +#define R9A08G045_CA55_GICCLK 5 +#define R9A08G045_CA55_PERICLK 6 +#define R9A08G045_CA55_ACLK 7 +#define R9A08G045_CA55_TSCLK 8 +#define R9A08G045_SRAM_ACPU_ACLK0 9 +#define R9A08G045_SRAM_ACPU_ACLK1 10 +#define R9A08G045_SRAM_ACPU_ACLK2 11 +#define R9A08G045_GIC600_GICCLK 12 +#define R9A08G045_IA55_CLK 13 +#define R9A08G045_IA55_PCLK 14 +#define R9A08G045_MHU_PCLK 15 +#define R9A08G045_SYC_CNT_CLK 16 +#define R9A08G045_DMAC_ACLK 17 +#define R9A08G045_DMAC_PCLK 18 +#define R9A08G045_OSTM0_PCLK 19 +#define R9A08G045_OSTM1_PCLK 20 +#define R9A08G045_OSTM2_PCLK 21 +#define R9A08G045_OSTM3_PCLK 22 +#define R9A08G045_OSTM4_PCLK 23 +#define R9A08G045_OSTM5_PCLK 24 +#define R9A08G045_OSTM6_PCLK 25 +#define R9A08G045_OSTM7_PCLK 26 +#define R9A08G045_MTU_X_MCK_MTU3 27 +#define R9A08G045_POE3_CLKM_POE 28 +#define R9A08G045_GPT_PCLK 29 +#define R9A08G045_POEG_A_CLKP 30 +#define R9A08G045_POEG_B_CLKP 31 +#define R9A08G045_POEG_C_CLKP 32 +#define R9A08G045_POEG_D_CLKP 33 +#define R9A08G045_WDT0_PCLK 34 +#define R9A08G045_WDT0_CLK 35 +#define R9A08G045_WDT1_PCLK 36 +#define R9A08G045_WDT1_CLK 37 +#define R9A08G045_WDT2_PCLK 38 +#define R9A08G045_WDT2_CLK 39 +#define R9A08G045_SPI_HCLK 40 +#define R9A08G045_SPI_ACLK 41 +#define R9A08G045_SPI_CLK 42 +#define R9A08G045_SPI_CLKX2 43 +#define R9A08G045_SDHI0_IMCLK 44 +#define R9A08G045_SDHI0_IMCLK2 45 +#define R9A08G045_SDHI0_CLK_HS 46 +#define R9A08G045_SDHI0_ACLK 47 +#define R9A08G045_SDHI1_IMCLK 48 +#define R9A08G045_SDHI1_IMCLK2 49 +#define R9A08G045_SDHI1_CLK_HS 50 +#define R9A08G045_SDHI1_ACLK 51 +#define R9A08G045_SDHI2_IMCLK 52 +#define R9A08G045_SDHI2_IMCLK2 53 +#define R9A08G045_SDHI2_CLK_HS 54 +#define R9A08G045_SDHI2_ACLK 55 +#define R9A08G045_SSI0_PCLK2 56 +#define R9A08G045_SSI0_PCLK_SFR 57 +#define R9A08G045_SSI1_PCLK2 58 +#define R9A08G045_SSI1_PCLK_SFR 59 +#define R9A08G045_SSI2_PCLK2 60 +#define R9A08G045_SSI2_PCLK_SFR 61 +#define R9A08G045_SSI3_PCLK2 62 +#define R9A08G045_SSI3_PCLK_SFR 63 +#define R9A08G045_SRC_CLKP 64 +#define R9A08G045_USB_U2H0_HCLK 65 +#define R9A08G045_USB_U2H1_HCLK 66 +#define R9A08G045_USB_U2P_EXR_CPUCLK 67 +#define R9A08G045_USB_PCLK 68 +#define R9A08G045_ETH0_CLK_AXI 69 +#define R9A08G045_ETH0_CLK_CHI 70 +#define R9A08G045_ETH0_REFCLK 71 +#define R9A08G045_ETH1_CLK_AXI 72 +#define R9A08G045_ETH1_CLK_CHI 73 +#define R9A08G045_ETH1_REFCLK 74 +#define R9A08G045_I2C0_PCLK 75 +#define R9A08G045_I2C1_PCLK 76 +#define R9A08G045_I2C2_PCLK 77 +#define R9A08G045_I2C3_PCLK 78 +#define R9A08G045_SCIF0_CLK_PCK 79 +#define R9A08G045_SCIF1_CLK_PCK 80 +#define R9A08G045_SCIF2_CLK_PCK 81 +#define R9A08G045_SCIF3_CLK_PCK 82 +#define R9A08G045_SCIF4_CLK_PCK 83 +#define R9A08G045_SCIF5_CLK_PCK 84 +#define R9A08G045_SCI0_CLKP 85 +#define R9A08G045_SCI1_CLKP 86 +#define R9A08G045_IRDA_CLKP 87 +#define R9A08G045_RSPI0_CLKB 88 +#define R9A08G045_RSPI1_CLKB 89 +#define R9A08G045_RSPI2_CLKB 90 +#define R9A08G045_RSPI3_CLKB 91 +#define R9A08G045_RSPI4_CLKB 92 +#define R9A08G045_CANFD_PCLK 93 +#define R9A08G045_CANFD_CLK_RAM 94 +#define R9A08G045_GPIO_HCLK 95 +#define R9A08G045_ADC_ADCLK 96 +#define R9A08G045_ADC_PCLK 97 +#define R9A08G045_TSU_PCLK 98 +#define R9A08G045_PDM_PCLK 99 +#define R9A08G045_PDM_CCLK 100 +#define R9A08G045_PCI_ACLK 101 +#define R9A08G045_PCI_CLKL1PM 102 +#define R9A08G045_SPDIF_PCLK 103 +#define R9A08G045_I3C_PCLK 104 +#define R9A08G045_I3C_TCLK 105 +#define R9A08G045_VBAT_BCLK 106 + +/* R9A08G045 Resets */ +#define R9A08G045_CA55_RST_1_0 0 +#define R9A08G045_CA55_RST_3_0 1 +#define R9A08G045_CA55_RST_4 2 +#define R9A08G045_CA55_RST_5 3 +#define R9A08G045_CA55_RST_6 4 +#define R9A08G045_CA55_RST_7 5 +#define R9A08G045_CA55_RST_8 6 +#define R9A08G045_CA55_RST_9 7 +#define R9A08G045_CA55_RST_10 8 +#define R9A08G045_CA55_RST_11 9 +#define R9A08G045_CA55_RST_12 10 +#define R9A08G045_SRAM_ACPU_ARESETN0 11 +#define R9A08G045_SRAM_ACPU_ARESETN1 12 +#define R9A08G045_SRAM_ACPU_ARESETN2 13 +#define R9A08G045_GIC600_GICRESET_N 14 +#define R9A08G045_GIC600_DBG_GICRESET_N 15 +#define R9A08G045_IA55_RESETN 16 +#define R9A08G045_MHU_RESETN 17 +#define R9A08G045_DMAC_ARESETN 18 +#define R9A08G045_DMAC_RST_ASYNC 19 +#define R9A08G045_SYC_RESETN 20 +#define R9A08G045_OSTM0_PRESETZ 21 +#define R9A08G045_OSTM1_PRESETZ 22 +#define R9A08G045_OSTM2_PRESETZ 23 +#define R9A08G045_OSTM3_PRESETZ 24 +#define R9A08G045_OSTM4_PRESETZ 25 +#define R9A08G045_OSTM5_PRESETZ 26 +#define R9A08G045_OSTM6_PRESETZ 27 +#define R9A08G045_OSTM7_PRESETZ 28 +#define R9A08G045_MTU_X_PRESET_MTU3 29 +#define R9A08G045_POE3_RST_M_REG 30 +#define R9A08G045_GPT_RST_C 31 +#define R9A08G045_POEG_A_RST 32 +#define R9A08G045_POEG_B_RST 33 +#define R9A08G045_POEG_C_RST 34 +#define R9A08G045_POEG_D_RST 35 +#define R9A08G045_WDT0_PRESETN 36 +#define R9A08G045_WDT1_PRESETN 37 +#define R9A08G045_WDT2_PRESETN 38 +#define R9A08G045_SPI_HRESETN 39 +#define R9A08G045_SPI_ARESETN 40 +#define R9A08G045_SDHI0_IXRST 41 +#define R9A08G045_SDHI1_IXRST 42 +#define R9A08G045_SDHI2_IXRST 43 +#define R9A08G045_SSI0_RST_M2_REG 44 +#define R9A08G045_SSI1_RST_M2_REG 45 +#define R9A08G045_SSI2_RST_M2_REG 46 +#define R9A08G045_SSI3_RST_M2_REG 47 +#define R9A08G045_SRC_RST 48 +#define R9A08G045_USB_U2H0_HRESETN 49 +#define R9A08G045_USB_U2H1_HRESETN 50 +#define R9A08G045_USB_U2P_EXL_SYSRST 51 +#define R9A08G045_USB_PRESETN 52 +#define R9A08G045_ETH0_RST_HW_N 53 +#define R9A08G045_ETH1_RST_HW_N 54 +#define R9A08G045_I2C0_MRST 55 +#define R9A08G045_I2C1_MRST 56 +#define R9A08G045_I2C2_MRST 57 +#define R9A08G045_I2C3_MRST 58 +#define R9A08G045_SCIF0_RST_SYSTEM_N 59 +#define R9A08G045_SCIF1_RST_SYSTEM_N 60 +#define R9A08G045_SCIF2_RST_SYSTEM_N 61 +#define R9A08G045_SCIF3_RST_SYSTEM_N 62 +#define R9A08G045_SCIF4_RST_SYSTEM_N 63 +#define R9A08G045_SCIF5_RST_SYSTEM_N 64 +#define R9A08G045_SCI0_RST 65 +#define R9A08G045_SCI1_RST 66 +#define R9A08G045_IRDA_RST 67 +#define R9A08G045_RSPI0_RST 68 +#define R9A08G045_RSPI1_RST 69 +#define R9A08G045_RSPI2_RST 70 +#define R9A08G045_RSPI3_RST 71 +#define R9A08G045_RSPI4_RST 72 +#define R9A08G045_CANFD_RSTP_N 73 +#define R9A08G045_CANFD_RSTC_N 74 +#define R9A08G045_GPIO_RSTN 75 +#define R9A08G045_GPIO_PORT_RESETN 76 +#define R9A08G045_GPIO_SPARE_RESETN 77 +#define R9A08G045_ADC_PRESETN 78 +#define R9A08G045_ADC_ADRST_N 79 +#define R9A08G045_TSU_PRESETN 80 +#define R9A08G045_OCTA_ARESETN 81 +#define R9A08G045_PDM0_PRESETNT 82 +#define R9A08G045_PCI_ARESETN 83 +#define R9A08G045_PCI_RST_B 84 +#define R9A08G045_PCI_RST_GP_B 85 +#define R9A08G045_PCI_RST_PS_B 86 +#define R9A08G045_PCI_RST_RSM_B 87 +#define R9A08G045_PCI_RST_CFG_B 88 +#define R9A08G045_PCI_RST_LOAD_B 89 +#define R9A08G045_SPDIF_RST 90 +#define R9A08G045_I3C_TRESETN 91 +#define R9A08G045_I3C_PRESETN 92 +#define R9A08G045_VBAT_BRESETN 93 + +#endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */ diff --git a/include/dt-bindings/clock/sophgo,cv1800.h b/include/dt-bindings/clock/sophgo,cv1800.h new file mode 100644 index 000000000000..cfbeca25a650 --- /dev/null +++ b/include/dt-bindings/clock/sophgo,cv1800.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (C) 2023 Sophgo Ltd. + */ + +#ifndef __DT_BINDINGS_SOPHGO_CV1800_CLK_H__ +#define __DT_BINDINGS_SOPHGO_CV1800_CLK_H__ + +#define CLK_MPLL 0 +#define CLK_TPLL 1 +#define CLK_FPLL 2 +#define CLK_MIPIMPLL 3 +#define CLK_A0PLL 4 +#define CLK_DISPPLL 5 +#define CLK_CAM0PLL 6 +#define CLK_CAM1PLL 7 + +#define CLK_MIPIMPLL_D3 8 +#define CLK_CAM0PLL_D2 9 +#define CLK_CAM0PLL_D3 10 + +#define CLK_TPU 11 +#define CLK_TPU_FAB 12 +#define CLK_AHB_ROM 13 +#define CLK_DDR_AXI_REG 14 +#define CLK_RTC_25M 15 +#define CLK_SRC_RTC_SYS_0 16 +#define CLK_TEMPSEN 17 +#define CLK_SARADC 18 +#define CLK_EFUSE 19 +#define CLK_APB_EFUSE 20 +#define CLK_DEBUG 21 +#define CLK_AP_DEBUG 22 +#define CLK_XTAL_MISC 23 +#define CLK_AXI4_EMMC 24 +#define CLK_EMMC 25 +#define CLK_EMMC_100K 26 +#define CLK_AXI4_SD0 27 +#define CLK_SD0 28 +#define CLK_SD0_100K 29 +#define CLK_AXI4_SD1 30 +#define CLK_SD1 31 +#define CLK_SD1_100K 32 +#define CLK_SPI_NAND 33 +#define CLK_ETH0_500M 34 +#define CLK_AXI4_ETH0 35 +#define CLK_ETH1_500M 36 +#define CLK_AXI4_ETH1 37 +#define CLK_APB_GPIO 38 +#define CLK_APB_GPIO_INTR 39 +#define CLK_GPIO_DB 40 +#define CLK_AHB_SF 41 +#define CLK_AHB_SF1 42 +#define CLK_A24M 43 +#define CLK_AUDSRC 44 +#define CLK_APB_AUDSRC 45 +#define CLK_SDMA_AXI 46 +#define CLK_SDMA_AUD0 47 +#define CLK_SDMA_AUD1 48 +#define CLK_SDMA_AUD2 49 +#define CLK_SDMA_AUD3 50 +#define CLK_I2C 51 +#define CLK_APB_I2C 52 +#define CLK_APB_I2C0 53 +#define CLK_APB_I2C1 54 +#define CLK_APB_I2C2 55 +#define CLK_APB_I2C3 56 +#define CLK_APB_I2C4 57 +#define CLK_APB_WDT 58 +#define CLK_PWM_SRC 59 +#define CLK_PWM 60 +#define CLK_SPI 61 +#define CLK_APB_SPI0 62 +#define CLK_APB_SPI1 63 +#define CLK_APB_SPI2 64 +#define CLK_APB_SPI3 65 +#define CLK_1M 66 +#define CLK_CAM0_200 67 +#define CLK_PM 68 +#define CLK_TIMER0 69 +#define CLK_TIMER1 70 +#define CLK_TIMER2 71 +#define CLK_TIMER3 72 +#define CLK_TIMER4 73 +#define CLK_TIMER5 74 +#define CLK_TIMER6 75 +#define CLK_TIMER7 76 +#define CLK_UART0 77 +#define CLK_APB_UART0 78 +#define CLK_UART1 79 +#define CLK_APB_UART1 80 +#define CLK_UART2 81 +#define CLK_APB_UART2 82 +#define CLK_UART3 83 +#define CLK_APB_UART3 84 +#define CLK_UART4 85 +#define CLK_APB_UART4 86 +#define CLK_APB_I2S0 87 +#define CLK_APB_I2S1 88 +#define CLK_APB_I2S2 89 +#define CLK_APB_I2S3 90 +#define CLK_AXI4_USB 91 +#define CLK_APB_USB 92 +#define CLK_USB_125M 93 +#define CLK_USB_33K 94 +#define CLK_USB_12M 95 +#define CLK_AXI4 96 +#define CLK_AXI6 97 +#define CLK_DSI_ESC 98 +#define CLK_AXI_VIP 99 +#define CLK_SRC_VIP_SYS_0 100 +#define CLK_SRC_VIP_SYS_1 101 +#define CLK_SRC_VIP_SYS_2 102 +#define CLK_SRC_VIP_SYS_3 103 +#define CLK_SRC_VIP_SYS_4 104 +#define CLK_CSI_BE_VIP 105 +#define CLK_CSI_MAC0_VIP 106 +#define CLK_CSI_MAC1_VIP 107 +#define CLK_CSI_MAC2_VIP 108 +#define CLK_CSI0_RX_VIP 109 +#define CLK_CSI1_RX_VIP 110 +#define CLK_ISP_TOP_VIP 111 +#define CLK_IMG_D_VIP 112 +#define CLK_IMG_V_VIP 113 +#define CLK_SC_TOP_VIP 114 +#define CLK_SC_D_VIP 115 +#define CLK_SC_V1_VIP 116 +#define CLK_SC_V2_VIP 117 +#define CLK_SC_V3_VIP 118 +#define CLK_DWA_VIP 119 +#define CLK_BT_VIP 120 +#define CLK_DISP_VIP 121 +#define CLK_DSI_MAC_VIP 122 +#define CLK_LVDS0_VIP 123 +#define CLK_LVDS1_VIP 124 +#define CLK_PAD_VI_VIP 125 +#define CLK_PAD_VI1_VIP 126 +#define CLK_PAD_VI2_VIP 127 +#define CLK_CFG_REG_VIP 128 +#define CLK_VIP_IP0 129 +#define CLK_VIP_IP1 130 +#define CLK_VIP_IP2 131 +#define CLK_VIP_IP3 132 +#define CLK_IVE_VIP 133 +#define CLK_RAW_VIP 134 +#define CLK_OSDC_VIP 135 +#define CLK_CAM0_VIP 136 +#define CLK_AXI_VIDEO_CODEC 137 +#define CLK_VC_SRC0 138 +#define CLK_VC_SRC1 139 +#define CLK_VC_SRC2 140 +#define CLK_H264C 141 +#define CLK_APB_H264C 142 +#define CLK_H265C 143 +#define CLK_APB_H265C 144 +#define CLK_JPEG 145 +#define CLK_APB_JPEG 146 +#define CLK_CAM0 147 +#define CLK_CAM1 148 +#define CLK_WGN 149 +#define CLK_WGN0 150 +#define CLK_WGN1 151 +#define CLK_WGN2 152 +#define CLK_KEYSCAN 153 +#define CLK_CFG_REG_VC 154 +#define CLK_C906_0 155 +#define CLK_C906_1 156 +#define CLK_A53 157 +#define CLK_CPU_AXI0 158 +#define CLK_CPU_GIC 159 +#define CLK_XTAL_AP 160 + +// Only for CV181x +#define CLK_DISP_SRC_VIP 161 + +#endif /* __DT_BINDINGS_SOPHGO_CV1800_CLK_H__ */ diff --git a/include/dt-bindings/clock/st,stm32mp25-rcc.h b/include/dt-bindings/clock/st,stm32mp25-rcc.h new file mode 100644 index 000000000000..b6cf05ad4be6 --- /dev/null +++ b/include/dt-bindings/clock/st,stm32mp25-rcc.h @@ -0,0 +1,492 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (C) STMicroelectronics 2023 - All Rights Reserved + * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> + */ + +#ifndef _DT_BINDINGS_STM32MP25_CLKS_H_ +#define _DT_BINDINGS_STM32MP25_CLKS_H_ + +/* INTERNAL/EXTERNAL OSCILLATORS */ +#define HSI_CK 0 +#define HSE_CK 1 +#define MSI_CK 2 +#define LSI_CK 3 +#define LSE_CK 4 +#define I2S_CK 5 +#define RTC_CK 6 +#define SPDIF_CK_SYMB 7 + +/* PLL CLOCKS */ +#define PLL1_CK 8 +#define PLL2_CK 9 +#define PLL3_CK 10 +#define PLL4_CK 11 +#define PLL5_CK 12 +#define PLL6_CK 13 +#define PLL7_CK 14 +#define PLL8_CK 15 + +#define CK_CPU1 16 + +/* APB DIV CLOCKS */ +#define CK_ICN_APB1 17 +#define CK_ICN_APB2 18 +#define CK_ICN_APB3 19 +#define CK_ICN_APB4 20 +#define CK_ICN_APBDBG 21 + +/* GLOBAL TIMER */ +#define TIMG1_CK 22 +#define TIMG2_CK 23 + +/* FLEXGEN CLOCKS */ +#define CK_ICN_HS_MCU 24 +#define CK_ICN_SDMMC 25 +#define CK_ICN_DDR 26 +#define CK_ICN_DISPLAY 27 +#define CK_ICN_HSL 28 +#define CK_ICN_NIC 29 +#define CK_ICN_VID 30 +#define CK_FLEXGEN_07 31 +#define CK_FLEXGEN_08 32 +#define CK_FLEXGEN_09 33 +#define CK_FLEXGEN_10 34 +#define CK_FLEXGEN_11 35 +#define CK_FLEXGEN_12 36 +#define CK_FLEXGEN_13 37 +#define CK_FLEXGEN_14 38 +#define CK_FLEXGEN_15 39 +#define CK_FLEXGEN_16 40 +#define CK_FLEXGEN_17 41 +#define CK_FLEXGEN_18 42 +#define CK_FLEXGEN_19 43 +#define CK_FLEXGEN_20 44 +#define CK_FLEXGEN_21 45 +#define CK_FLEXGEN_22 46 +#define CK_FLEXGEN_23 47 +#define CK_FLEXGEN_24 48 +#define CK_FLEXGEN_25 49 +#define CK_FLEXGEN_26 50 +#define CK_FLEXGEN_27 51 +#define CK_FLEXGEN_28 52 +#define CK_FLEXGEN_29 53 +#define CK_FLEXGEN_30 54 +#define CK_FLEXGEN_31 55 +#define CK_FLEXGEN_32 56 +#define CK_FLEXGEN_33 57 +#define CK_FLEXGEN_34 58 +#define CK_FLEXGEN_35 59 +#define CK_FLEXGEN_36 60 +#define CK_FLEXGEN_37 61 +#define CK_FLEXGEN_38 62 +#define CK_FLEXGEN_39 63 +#define CK_FLEXGEN_40 64 +#define CK_FLEXGEN_41 65 +#define CK_FLEXGEN_42 66 +#define CK_FLEXGEN_43 67 +#define CK_FLEXGEN_44 68 +#define CK_FLEXGEN_45 69 +#define CK_FLEXGEN_46 70 +#define CK_FLEXGEN_47 71 +#define CK_FLEXGEN_48 72 +#define CK_FLEXGEN_49 73 +#define CK_FLEXGEN_50 74 +#define CK_FLEXGEN_51 75 +#define CK_FLEXGEN_52 76 +#define CK_FLEXGEN_53 77 +#define CK_FLEXGEN_54 78 +#define CK_FLEXGEN_55 79 +#define CK_FLEXGEN_56 80 +#define CK_FLEXGEN_57 81 +#define CK_FLEXGEN_58 82 +#define CK_FLEXGEN_59 83 +#define CK_FLEXGEN_60 84 +#define CK_FLEXGEN_61 85 +#define CK_FLEXGEN_62 86 +#define CK_FLEXGEN_63 87 + +/* LOW SPEED MCU CLOCK */ +#define CK_ICN_LS_MCU 88 + +#define CK_BUS_STM500 89 +#define CK_BUS_FMC 90 +#define CK_BUS_GPU 91 +#define CK_BUS_ETH1 92 +#define CK_BUS_ETH2 93 +#define CK_BUS_PCIE 94 +#define CK_BUS_DDRPHYC 95 +#define CK_BUS_SYSCPU1 96 +#define CK_BUS_ETHSW 97 +#define CK_BUS_HPDMA1 98 +#define CK_BUS_HPDMA2 99 +#define CK_BUS_HPDMA3 100 +#define CK_BUS_ADC12 101 +#define CK_BUS_ADC3 102 +#define CK_BUS_IPCC1 103 +#define CK_BUS_CCI 104 +#define CK_BUS_CRC 105 +#define CK_BUS_MDF1 106 +#define CK_BUS_OSPIIOM 107 +#define CK_BUS_BKPSRAM 108 +#define CK_BUS_HASH 109 +#define CK_BUS_RNG 110 +#define CK_BUS_CRYP1 111 +#define CK_BUS_CRYP2 112 +#define CK_BUS_SAES 113 +#define CK_BUS_PKA 114 +#define CK_BUS_GPIOA 115 +#define CK_BUS_GPIOB 116 +#define CK_BUS_GPIOC 117 +#define CK_BUS_GPIOD 118 +#define CK_BUS_GPIOE 119 +#define CK_BUS_GPIOF 120 +#define CK_BUS_GPIOG 121 +#define CK_BUS_GPIOH 122 +#define CK_BUS_GPIOI 123 +#define CK_BUS_GPIOJ 124 +#define CK_BUS_GPIOK 125 +#define CK_BUS_LPSRAM1 126 +#define CK_BUS_LPSRAM2 127 +#define CK_BUS_LPSRAM3 128 +#define CK_BUS_GPIOZ 129 +#define CK_BUS_LPDMA 130 +#define CK_BUS_HSEM 131 +#define CK_BUS_IPCC2 132 +#define CK_BUS_RTC 133 +#define CK_BUS_SPI8 134 +#define CK_BUS_LPUART1 135 +#define CK_BUS_I2C8 136 +#define CK_BUS_LPTIM3 137 +#define CK_BUS_LPTIM4 138 +#define CK_BUS_LPTIM5 139 +#define CK_BUS_IWDG5 140 +#define CK_BUS_WWDG2 141 +#define CK_BUS_I3C4 142 +#define CK_BUS_TIM2 143 +#define CK_BUS_TIM3 144 +#define CK_BUS_TIM4 145 +#define CK_BUS_TIM5 146 +#define CK_BUS_TIM6 147 +#define CK_BUS_TIM7 148 +#define CK_BUS_TIM10 149 +#define CK_BUS_TIM11 150 +#define CK_BUS_TIM12 151 +#define CK_BUS_TIM13 152 +#define CK_BUS_TIM14 153 +#define CK_BUS_LPTIM1 154 +#define CK_BUS_LPTIM2 155 +#define CK_BUS_SPI2 156 +#define CK_BUS_SPI3 157 +#define CK_BUS_SPDIFRX 158 +#define CK_BUS_USART2 159 +#define CK_BUS_USART3 160 +#define CK_BUS_UART4 161 +#define CK_BUS_UART5 162 +#define CK_BUS_I2C1 163 +#define CK_BUS_I2C2 164 +#define CK_BUS_I2C3 165 +#define CK_BUS_I2C4 166 +#define CK_BUS_I2C5 167 +#define CK_BUS_I2C6 168 +#define CK_BUS_I2C7 169 +#define CK_BUS_I3C1 170 +#define CK_BUS_I3C2 171 +#define CK_BUS_I3C3 172 +#define CK_BUS_TIM1 173 +#define CK_BUS_TIM8 174 +#define CK_BUS_TIM15 175 +#define CK_BUS_TIM16 176 +#define CK_BUS_TIM17 177 +#define CK_BUS_TIM20 178 +#define CK_BUS_SAI1 179 +#define CK_BUS_SAI2 180 +#define CK_BUS_SAI3 181 +#define CK_BUS_SAI4 182 +#define CK_BUS_USART1 183 +#define CK_BUS_USART6 184 +#define CK_BUS_UART7 185 +#define CK_BUS_UART8 186 +#define CK_BUS_UART9 187 +#define CK_BUS_FDCAN 188 +#define CK_BUS_SPI1 189 +#define CK_BUS_SPI4 190 +#define CK_BUS_SPI5 191 +#define CK_BUS_SPI6 192 +#define CK_BUS_SPI7 193 +#define CK_BUS_BSEC 194 +#define CK_BUS_IWDG1 195 +#define CK_BUS_IWDG2 196 +#define CK_BUS_IWDG3 197 +#define CK_BUS_IWDG4 198 +#define CK_BUS_WWDG1 199 +#define CK_BUS_VREF 200 +#define CK_BUS_DTS 201 +#define CK_BUS_SERC 202 +#define CK_BUS_HDP 203 +#define CK_BUS_IS2M 204 +#define CK_BUS_DSI 205 +#define CK_BUS_LTDC 206 +#define CK_BUS_CSI 207 +#define CK_BUS_DCMIPP 208 +#define CK_BUS_DDRC 209 +#define CK_BUS_DDRCFG 210 +#define CK_BUS_GICV2M 211 +#define CK_BUS_USBTC 212 +#define CK_BUS_USB3PCIEPHY 214 +#define CK_BUS_STGEN 215 +#define CK_BUS_VDEC 216 +#define CK_BUS_VENC 217 +#define CK_SYSDBG 218 +#define CK_KER_TIM2 219 +#define CK_KER_TIM3 220 +#define CK_KER_TIM4 221 +#define CK_KER_TIM5 222 +#define CK_KER_TIM6 223 +#define CK_KER_TIM7 224 +#define CK_KER_TIM10 225 +#define CK_KER_TIM11 226 +#define CK_KER_TIM12 227 +#define CK_KER_TIM13 228 +#define CK_KER_TIM14 229 +#define CK_KER_TIM1 230 +#define CK_KER_TIM8 231 +#define CK_KER_TIM15 232 +#define CK_KER_TIM16 233 +#define CK_KER_TIM17 234 +#define CK_KER_TIM20 235 +#define CK_BUS_SYSRAM 236 +#define CK_BUS_VDERAM 237 +#define CK_BUS_RETRAM 238 +#define CK_BUS_OSPI1 239 +#define CK_BUS_OSPI2 240 +#define CK_BUS_OTFD1 241 +#define CK_BUS_OTFD2 242 +#define CK_BUS_SRAM1 243 +#define CK_BUS_SRAM2 244 +#define CK_BUS_SDMMC1 245 +#define CK_BUS_SDMMC2 246 +#define CK_BUS_SDMMC3 247 +#define CK_BUS_DDR 248 +#define CK_BUS_RISAF4 249 +#define CK_BUS_USB2OHCI 250 +#define CK_BUS_USB2EHCI 251 +#define CK_BUS_USB3DR 252 +#define CK_KER_LPTIM1 253 +#define CK_KER_LPTIM2 254 +#define CK_KER_USART2 255 +#define CK_KER_UART4 256 +#define CK_KER_USART3 257 +#define CK_KER_UART5 258 +#define CK_KER_SPI2 259 +#define CK_KER_SPI3 260 +#define CK_KER_SPDIFRX 261 +#define CK_KER_I2C1 262 +#define CK_KER_I2C2 263 +#define CK_KER_I3C1 264 +#define CK_KER_I3C2 265 +#define CK_KER_I2C3 266 +#define CK_KER_I2C5 267 +#define CK_KER_I3C3 268 +#define CK_KER_I2C4 269 +#define CK_KER_I2C6 270 +#define CK_KER_I2C7 271 +#define CK_KER_SPI1 272 +#define CK_KER_SPI4 273 +#define CK_KER_SPI5 274 +#define CK_KER_SPI6 275 +#define CK_KER_SPI7 276 +#define CK_KER_USART1 277 +#define CK_KER_USART6 278 +#define CK_KER_UART7 279 +#define CK_KER_UART8 280 +#define CK_KER_UART9 281 +#define CK_KER_MDF1 282 +#define CK_KER_SAI1 283 +#define CK_KER_SAI2 284 +#define CK_KER_SAI3 285 +#define CK_KER_SAI4 286 +#define CK_KER_FDCAN 287 +#define CK_KER_DSIBLANE 288 +#define CK_KER_DSIPHY 289 +#define CK_KER_CSI 290 +#define CK_KER_CSITXESC 291 +#define CK_KER_CSIPHY 292 +#define CK_KER_LVDSPHY 293 +#define CK_KER_STGEN 294 +#define CK_KER_USB3PCIEPHY 295 +#define CK_KER_USB2PHY2EN 296 +#define CK_KER_I3C4 297 +#define CK_KER_SPI8 298 +#define CK_KER_I2C8 299 +#define CK_KER_LPUART1 300 +#define CK_KER_LPTIM3 301 +#define CK_KER_LPTIM4 302 +#define CK_KER_LPTIM5 303 +#define CK_KER_TSDBG 304 +#define CK_KER_TPIU 305 +#define CK_BUS_ETR 306 +#define CK_BUS_SYSATB 307 +#define CK_KER_ADC12 308 +#define CK_KER_ADC3 309 +#define CK_KER_OSPI1 310 +#define CK_KER_OSPI2 311 +#define CK_KER_FMC 312 +#define CK_KER_SDMMC1 313 +#define CK_KER_SDMMC2 314 +#define CK_KER_SDMMC3 315 +#define CK_KER_ETH1 316 +#define CK_KER_ETH2 317 +#define CK_KER_ETH1PTP 318 +#define CK_KER_ETH2PTP 319 +#define CK_KER_USB2PHY1 320 +#define CK_KER_USB2PHY2 321 +#define CK_KER_ETHSW 322 +#define CK_KER_ETHSWREF 323 +#define CK_MCO1 324 +#define CK_MCO2 325 +#define CK_KER_DTS 326 +#define CK_ETH1_RX 327 +#define CK_ETH1_TX 328 +#define CK_ETH1_MAC 329 +#define CK_ETH2_RX 330 +#define CK_ETH2_TX 331 +#define CK_ETH2_MAC 332 +#define CK_ETH1_STP 333 +#define CK_ETH2_STP 334 +#define CK_KER_USBTC 335 +#define CK_BUS_ADF1 336 +#define CK_KER_ADF1 337 +#define CK_BUS_LVDS 338 +#define CK_KER_LTDC 339 +#define CK_KER_GPU 340 +#define CK_BUS_ETHSWACMCFG 341 +#define CK_BUS_ETHSWACMMSG 342 +#define HSE_DIV2_CK 343 + +#define STM32MP25_LAST_CLK 344 + +#define CK_SCMI_ICN_HS_MCU 0 +#define CK_SCMI_ICN_SDMMC 1 +#define CK_SCMI_ICN_DDR 2 +#define CK_SCMI_ICN_DISPLAY 3 +#define CK_SCMI_ICN_HSL 4 +#define CK_SCMI_ICN_NIC 5 +#define CK_SCMI_ICN_VID 6 +#define CK_SCMI_FLEXGEN_07 7 +#define CK_SCMI_FLEXGEN_08 8 +#define CK_SCMI_FLEXGEN_09 9 +#define CK_SCMI_FLEXGEN_10 10 +#define CK_SCMI_FLEXGEN_11 11 +#define CK_SCMI_FLEXGEN_12 12 +#define CK_SCMI_FLEXGEN_13 13 +#define CK_SCMI_FLEXGEN_14 14 +#define CK_SCMI_FLEXGEN_15 15 +#define CK_SCMI_FLEXGEN_16 16 +#define CK_SCMI_FLEXGEN_17 17 +#define CK_SCMI_FLEXGEN_18 18 +#define CK_SCMI_FLEXGEN_19 19 +#define CK_SCMI_FLEXGEN_20 20 +#define CK_SCMI_FLEXGEN_21 21 +#define CK_SCMI_FLEXGEN_22 22 +#define CK_SCMI_FLEXGEN_23 23 +#define CK_SCMI_FLEXGEN_24 24 +#define CK_SCMI_FLEXGEN_25 25 +#define CK_SCMI_FLEXGEN_26 26 +#define CK_SCMI_FLEXGEN_27 27 +#define CK_SCMI_FLEXGEN_28 28 +#define CK_SCMI_FLEXGEN_29 29 +#define CK_SCMI_FLEXGEN_30 30 +#define CK_SCMI_FLEXGEN_31 31 +#define CK_SCMI_FLEXGEN_32 32 +#define CK_SCMI_FLEXGEN_33 33 +#define CK_SCMI_FLEXGEN_34 34 +#define CK_SCMI_FLEXGEN_35 35 +#define CK_SCMI_FLEXGEN_36 36 +#define CK_SCMI_FLEXGEN_37 37 +#define CK_SCMI_FLEXGEN_38 38 +#define CK_SCMI_FLEXGEN_39 39 +#define CK_SCMI_FLEXGEN_40 40 +#define CK_SCMI_FLEXGEN_41 41 +#define CK_SCMI_FLEXGEN_42 42 +#define CK_SCMI_FLEXGEN_43 43 +#define CK_SCMI_FLEXGEN_44 44 +#define CK_SCMI_FLEXGEN_45 45 +#define CK_SCMI_FLEXGEN_46 46 +#define CK_SCMI_FLEXGEN_47 47 +#define CK_SCMI_FLEXGEN_48 48 +#define CK_SCMI_FLEXGEN_49 49 +#define CK_SCMI_FLEXGEN_50 50 +#define CK_SCMI_FLEXGEN_51 51 +#define CK_SCMI_FLEXGEN_52 52 +#define CK_SCMI_FLEXGEN_53 53 +#define CK_SCMI_FLEXGEN_54 54 +#define CK_SCMI_FLEXGEN_55 55 +#define CK_SCMI_FLEXGEN_56 56 +#define CK_SCMI_FLEXGEN_57 57 +#define CK_SCMI_FLEXGEN_58 58 +#define CK_SCMI_FLEXGEN_59 59 +#define CK_SCMI_FLEXGEN_60 60 +#define CK_SCMI_FLEXGEN_61 61 +#define CK_SCMI_FLEXGEN_62 62 +#define CK_SCMI_FLEXGEN_63 63 +#define CK_SCMI_ICN_LS_MCU 64 +#define CK_SCMI_HSE 65 +#define CK_SCMI_LSE 66 +#define CK_SCMI_HSI 67 +#define CK_SCMI_LSI 68 +#define CK_SCMI_MSI 69 +#define CK_SCMI_HSE_DIV2 70 +#define CK_SCMI_CPU1 71 +#define CK_SCMI_SYSCPU1 72 +#define CK_SCMI_PLL2 73 +#define CK_SCMI_PLL3 74 +#define CK_SCMI_RTC 75 +#define CK_SCMI_RTCCK 76 +#define CK_SCMI_ICN_APB1 77 +#define CK_SCMI_ICN_APB2 78 +#define CK_SCMI_ICN_APB3 79 +#define CK_SCMI_ICN_APB4 80 +#define CK_SCMI_ICN_APBDBG 81 +#define CK_SCMI_TIMG1 82 +#define CK_SCMI_TIMG2 83 +#define CK_SCMI_BKPSRAM 84 +#define CK_SCMI_BSEC 85 +#define CK_SCMI_ETR 87 +#define CK_SCMI_FMC 88 +#define CK_SCMI_GPIOA 89 +#define CK_SCMI_GPIOB 90 +#define CK_SCMI_GPIOC 91 +#define CK_SCMI_GPIOD 92 +#define CK_SCMI_GPIOE 93 +#define CK_SCMI_GPIOF 94 +#define CK_SCMI_GPIOG 95 +#define CK_SCMI_GPIOH 96 +#define CK_SCMI_GPIOI 97 +#define CK_SCMI_GPIOJ 98 +#define CK_SCMI_GPIOK 99 +#define CK_SCMI_GPIOZ 100 +#define CK_SCMI_HPDMA1 101 +#define CK_SCMI_HPDMA2 102 +#define CK_SCMI_HPDMA3 103 +#define CK_SCMI_HSEM 104 +#define CK_SCMI_IPCC1 105 +#define CK_SCMI_IPCC2 106 +#define CK_SCMI_LPDMA 107 +#define CK_SCMI_RETRAM 108 +#define CK_SCMI_SRAM1 109 +#define CK_SCMI_SRAM2 110 +#define CK_SCMI_LPSRAM1 111 +#define CK_SCMI_LPSRAM2 112 +#define CK_SCMI_LPSRAM3 113 +#define CK_SCMI_VDERAM 114 +#define CK_SCMI_SYSRAM 115 +#define CK_SCMI_OSPI1 116 +#define CK_SCMI_OSPI2 117 +#define CK_SCMI_TPIU 118 +#define CK_SCMI_SYSDBG 119 +#define CK_SCMI_SYSATB 120 +#define CK_SCMI_TSDBG 121 +#define CK_SCMI_STM500 122 + +#endif /* _DT_BINDINGS_STM32MP25_CLKS_H_ */ diff --git a/include/dt-bindings/dma/fsl-edma.h b/include/dt-bindings/dma/fsl-edma.h new file mode 100644 index 000000000000..fd11478cfe9c --- /dev/null +++ b/include/dt-bindings/dma/fsl-edma.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ + +#ifndef _FSL_EDMA_DT_BINDING_H_ +#define _FSL_EDMA_DT_BINDING_H_ + +/* Receive Channel */ +#define FSL_EDMA_RX 0x1 + +/* iMX8 audio remote DMA */ +#define FSL_EDMA_REMOTE 0x2 + +/* FIFO is continue memory region */ +#define FSL_EDMA_MULTI_FIFO 0x4 + +/* Channel need stick to even channel */ +#define FSL_EDMA_EVEN_CH 0x8 + +/* Channel need stick to odd channel */ +#define FSL_EDMA_ODD_CH 0x10 + +#endif diff --git a/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h new file mode 100644 index 000000000000..4e16d31a71c9 --- /dev/null +++ b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ +/* + * Copyright (c) 2023 Amlogic, Inc. All rights reserved. + * Author: Huqiang Qin <huqiang.qin@amlogic.com> + */ + +#ifndef _DT_BINDINGS_AMLOGIC_T7_GPIO_H +#define _DT_BINDINGS_AMLOGIC_T7_GPIO_H + +#define GPIOB_0 0 +#define GPIOB_1 1 +#define GPIOB_2 2 +#define GPIOB_3 3 +#define GPIOB_4 4 +#define GPIOB_5 5 +#define GPIOB_6 6 +#define GPIOB_7 7 +#define GPIOB_8 8 +#define GPIOB_9 9 +#define GPIOB_10 10 +#define GPIOB_11 11 +#define GPIOB_12 12 + +#define GPIOC_0 13 +#define GPIOC_1 14 +#define GPIOC_2 15 +#define GPIOC_3 16 +#define GPIOC_4 17 +#define GPIOC_5 18 +#define GPIOC_6 19 + +#define GPIOX_0 20 +#define GPIOX_1 21 +#define GPIOX_2 22 +#define GPIOX_3 23 +#define GPIOX_4 24 +#define GPIOX_5 25 +#define GPIOX_6 26 +#define GPIOX_7 27 +#define GPIOX_8 28 +#define GPIOX_9 29 +#define GPIOX_10 30 +#define GPIOX_11 31 +#define GPIOX_12 32 +#define GPIOX_13 33 +#define GPIOX_14 34 +#define GPIOX_15 35 +#define GPIOX_16 36 +#define GPIOX_17 37 +#define GPIOX_18 38 +#define GPIOX_19 39 + +#define GPIOW_0 40 +#define GPIOW_1 41 +#define GPIOW_2 42 +#define GPIOW_3 43 +#define GPIOW_4 44 +#define GPIOW_5 45 +#define GPIOW_6 46 +#define GPIOW_7 47 +#define GPIOW_8 48 +#define GPIOW_9 49 +#define GPIOW_10 50 +#define GPIOW_11 51 +#define GPIOW_12 52 +#define GPIOW_13 53 +#define GPIOW_14 54 +#define GPIOW_15 55 +#define GPIOW_16 56 + +#define GPIOD_0 57 +#define GPIOD_1 58 +#define GPIOD_2 59 +#define GPIOD_3 60 +#define GPIOD_4 61 +#define GPIOD_5 62 +#define GPIOD_6 63 +#define GPIOD_7 64 +#define GPIOD_8 65 +#define GPIOD_9 66 +#define GPIOD_10 67 +#define GPIOD_11 68 +#define GPIOD_12 69 + +#define GPIOE_0 70 +#define GPIOE_1 71 +#define GPIOE_2 72 +#define GPIOE_3 73 +#define GPIOE_4 74 +#define GPIOE_5 75 +#define GPIOE_6 76 + +#define GPIOZ_0 77 +#define GPIOZ_1 78 +#define GPIOZ_2 79 +#define GPIOZ_3 80 +#define GPIOZ_4 81 +#define GPIOZ_5 82 +#define GPIOZ_6 83 +#define GPIOZ_7 84 +#define GPIOZ_8 85 +#define GPIOZ_9 86 +#define GPIOZ_10 87 +#define GPIOZ_11 88 +#define GPIOZ_12 89 +#define GPIOZ_13 90 + +#define GPIOT_0 91 +#define GPIOT_1 92 +#define GPIOT_2 93 +#define GPIOT_3 94 +#define GPIOT_4 95 +#define GPIOT_5 96 +#define GPIOT_6 97 +#define GPIOT_7 98 +#define GPIOT_8 99 +#define GPIOT_9 100 +#define GPIOT_10 101 +#define GPIOT_11 102 +#define GPIOT_12 103 +#define GPIOT_13 104 +#define GPIOT_14 105 +#define GPIOT_15 106 +#define GPIOT_16 107 +#define GPIOT_17 108 +#define GPIOT_18 109 +#define GPIOT_19 110 +#define GPIOT_20 111 +#define GPIOT_21 112 +#define GPIOT_22 113 +#define GPIOT_23 114 + +#define GPIOM_0 115 +#define GPIOM_1 116 +#define GPIOM_2 117 +#define GPIOM_3 118 +#define GPIOM_4 119 +#define GPIOM_5 120 +#define GPIOM_6 121 +#define GPIOM_7 122 +#define GPIOM_8 123 +#define GPIOM_9 124 +#define GPIOM_10 125 +#define GPIOM_11 126 +#define GPIOM_12 127 +#define GPIOM_13 128 + +#define GPIOY_0 129 +#define GPIOY_1 130 +#define GPIOY_2 131 +#define GPIOY_3 132 +#define GPIOY_4 133 +#define GPIOY_5 134 +#define GPIOY_6 135 +#define GPIOY_7 136 +#define GPIOY_8 137 +#define GPIOY_9 138 +#define GPIOY_10 139 +#define GPIOY_11 140 +#define GPIOY_12 141 +#define GPIOY_13 142 +#define GPIOY_14 143 +#define GPIOY_15 144 +#define GPIOY_16 145 +#define GPIOY_17 146 +#define GPIOY_18 147 + +#define GPIOH_0 148 +#define GPIOH_1 149 +#define GPIOH_2 150 +#define GPIOH_3 151 +#define GPIOH_4 152 +#define GPIOH_5 153 +#define GPIOH_6 154 +#define GPIOH_7 155 + +#define GPIO_TEST_N 156 + +#endif /* _DT_BINDINGS_AMLOGIC_T7_GPIO_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h new file mode 100644 index 000000000000..96908014e09e --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H + +#ifndef PM7325_SID +#define PM7325_SID 1 +#endif + +#include <dt-bindings/iio/qcom,spmi-vadc.h> + +/* ADC channels for PM7325_ADC for PMIC7 */ +#define PM7325_ADC7_REF_GND (PM7325_SID << 8 | ADC7_REF_GND) +#define PM7325_ADC7_1P25VREF (PM7325_SID << 8 | ADC7_1P25VREF) +#define PM7325_ADC7_VREF_VADC (PM7325_SID << 8 | ADC7_VREF_VADC) +#define PM7325_ADC7_DIE_TEMP (PM7325_SID << 8 | ADC7_DIE_TEMP) + +#define PM7325_ADC7_AMUX_THM1 (PM7325_SID << 8 | ADC7_AMUX_THM1) +#define PM7325_ADC7_AMUX_THM2 (PM7325_SID << 8 | ADC7_AMUX_THM2) +#define PM7325_ADC7_AMUX_THM3 (PM7325_SID << 8 | ADC7_AMUX_THM3) +#define PM7325_ADC7_AMUX_THM4 (PM7325_SID << 8 | ADC7_AMUX_THM4) +#define PM7325_ADC7_AMUX_THM5 (PM7325_SID << 8 | ADC7_AMUX_THM5) +#define PM7325_ADC7_GPIO1 (PM7325_SID << 8 | ADC7_GPIO1) +#define PM7325_ADC7_GPIO2 (PM7325_SID << 8 | ADC7_GPIO2) +#define PM7325_ADC7_GPIO3 (PM7325_SID << 8 | ADC7_GPIO3) +#define PM7325_ADC7_GPIO4 (PM7325_SID << 8 | ADC7_GPIO4) + +/* 30k pull-up1 */ +#define PM7325_ADC7_AMUX_THM1_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_30K_PU) +#define PM7325_ADC7_AMUX_THM2_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_30K_PU) +#define PM7325_ADC7_AMUX_THM3_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_30K_PU) +#define PM7325_ADC7_AMUX_THM4_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_30K_PU) +#define PM7325_ADC7_AMUX_THM5_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_30K_PU) +#define PM7325_ADC7_GPIO1_30K_PU (PM7325_SID << 8 | ADC7_GPIO1_30K_PU) +#define PM7325_ADC7_GPIO2_30K_PU (PM7325_SID << 8 | ADC7_GPIO2_30K_PU) +#define PM7325_ADC7_GPIO3_30K_PU (PM7325_SID << 8 | ADC7_GPIO3_30K_PU) +#define PM7325_ADC7_GPIO4_30K_PU (PM7325_SID << 8 | ADC7_GPIO4_30K_PU) + +/* 100k pull-up2 */ +#define PM7325_ADC7_AMUX_THM1_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_100K_PU) +#define PM7325_ADC7_AMUX_THM2_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_100K_PU) +#define PM7325_ADC7_AMUX_THM3_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_100K_PU) +#define PM7325_ADC7_AMUX_THM4_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_100K_PU) +#define PM7325_ADC7_AMUX_THM5_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_100K_PU) +#define PM7325_ADC7_GPIO1_100K_PU (PM7325_SID << 8 | ADC7_GPIO1_100K_PU) +#define PM7325_ADC7_GPIO2_100K_PU (PM7325_SID << 8 | ADC7_GPIO2_100K_PU) +#define PM7325_ADC7_GPIO3_100K_PU (PM7325_SID << 8 | ADC7_GPIO3_100K_PU) +#define PM7325_ADC7_GPIO4_100K_PU (PM7325_SID << 8 | ADC7_GPIO4_100K_PU) + +/* 400k pull-up3 */ +#define PM7325_ADC7_AMUX_THM1_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_400K_PU) +#define PM7325_ADC7_AMUX_THM2_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_400K_PU) +#define PM7325_ADC7_AMUX_THM3_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_400K_PU) +#define PM7325_ADC7_AMUX_THM4_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_400K_PU) +#define PM7325_ADC7_AMUX_THM5_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_400K_PU) +#define PM7325_ADC7_GPIO1_400K_PU (PM7325_SID << 8 | ADC7_GPIO1_400K_PU) +#define PM7325_ADC7_GPIO2_400K_PU (PM7325_SID << 8 | ADC7_GPIO2_400K_PU) +#define PM7325_ADC7_GPIO3_400K_PU (PM7325_SID << 8 | ADC7_GPIO3_400K_PU) +#define PM7325_ADC7_GPIO4_400K_PU (PM7325_SID << 8 | ADC7_GPIO4_400K_PU) + +/* 1/3 Divider */ +#define PM7325_ADC7_GPIO4_DIV3 (PM7325_SID << 8 | ADC7_GPIO4_DIV3) + +#define PM7325_ADC7_VPH_PWR (PM7325_SID << 8 | ADC7_VPH_PWR) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h new file mode 100644 index 000000000000..c0680d1285cf --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H + +#include <dt-bindings/iio/qcom,spmi-vadc.h> + +#define SMB139x_1_ADC7_SMB_TEMP (SMB139x_1_SID << 8 | ADC7_SMB_TEMP) +#define SMB139x_1_ADC7_ICHG_SMB (SMB139x_1_SID << 8 | ADC7_ICHG_SMB) +#define SMB139x_1_ADC7_IIN_SMB (SMB139x_1_SID << 8 | ADC7_IIN_SMB) + +#define SMB139x_2_ADC7_SMB_TEMP (SMB139x_2_SID << 8 | ADC7_SMB_TEMP) +#define SMB139x_2_ADC7_ICHG_SMB (SMB139x_2_SID << 8 | ADC7_ICHG_SMB) +#define SMB139x_2_ADC7_IIN_SMB (SMB139x_2_SID << 8 | ADC7_IIN_SMB) + +#endif diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h index 08adfe25964c..ef07ecd4d585 100644 --- a/include/dt-bindings/iio/qcom,spmi-vadc.h +++ b/include/dt-bindings/iio/qcom,spmi-vadc.h @@ -239,12 +239,15 @@ #define ADC7_GPIO3 0x0c #define ADC7_GPIO4 0x0d +#define ADC7_SMB_TEMP 0x06 #define ADC7_CHG_TEMP 0x10 #define ADC7_USB_IN_V_16 0x11 #define ADC7_VDC_16 0x12 #define ADC7_CC1_ID 0x13 #define ADC7_VREF_BAT_THERM 0x15 #define ADC7_IIN_FB 0x17 +#define ADC7_ICHG_SMB 0x18 +#define ADC7_IIN_SMB 0x19 /* 30k pull-up1 */ #define ADC7_AMUX_THM1_30K_PU 0x24 diff --git a/include/dt-bindings/interconnect/qcom,sdx75.h b/include/dt-bindings/interconnect/qcom,sdx75.h new file mode 100644 index 000000000000..e903f5f3dd8f --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sdx75.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H + +#define MASTER_QPIC_CORE 0 +#define MASTER_QUP_CORE_0 1 +#define SLAVE_QPIC_CORE 2 +#define SLAVE_QUP_CORE_0 3 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_LAGG_CFG 1 +#define SLAVE_MCCC_MASTER 2 +#define SLAVE_GEM_NOC_CFG 3 +#define SLAVE_SNOOP_BWMON 4 + +#define MASTER_SYS_TCU 0 +#define MASTER_APPSS_PROC 1 +#define MASTER_GEM_NOC_CFG 2 +#define MASTER_MSS_PROC 3 +#define MASTER_ANOC_PCIE_GEM_NOC 4 +#define MASTER_SNOC_SF_MEM_NOC 5 +#define MASTER_GIC 6 +#define MASTER_IPA_PCIE 7 +#define SLAVE_GEM_NOC_CNOC 8 +#define SLAVE_LLCC 9 +#define SLAVE_MEM_NOC_PCIE_SNOC 10 +#define SLAVE_SERVICE_GEM_NOC 11 + +#define MASTER_PCIE_0 0 +#define MASTER_PCIE_1 1 +#define MASTER_PCIE_2 2 +#define SLAVE_ANOC_PCIE_GEM_NOC 3 + +#define MASTER_AUDIO 0 +#define MASTER_GIC_AHB 1 +#define MASTER_PCIE_RSCC 2 +#define MASTER_QDSS_BAM 3 +#define MASTER_QPIC 4 +#define MASTER_QUP_0 5 +#define MASTER_ANOC_SNOC 6 +#define MASTER_GEM_NOC_CNOC 7 +#define MASTER_GEM_NOC_PCIE_SNOC 8 +#define MASTER_SNOC_CFG 9 +#define MASTER_PCIE_ANOC_CFG 10 +#define MASTER_CRYPTO 11 +#define MASTER_IPA 12 +#define MASTER_MVMSS 13 +#define MASTER_EMAC_0 14 +#define MASTER_EMAC_1 15 +#define MASTER_QDSS_ETR 16 +#define MASTER_QDSS_ETR_1 17 +#define MASTER_SDCC_1 18 +#define MASTER_SDCC_4 19 +#define MASTER_USB3_0 20 +#define SLAVE_ETH0_CFG 21 +#define SLAVE_ETH1_CFG 22 +#define SLAVE_AUDIO 23 +#define SLAVE_CLK_CTL 24 +#define SLAVE_CRYPTO_0_CFG 25 +#define SLAVE_IMEM_CFG 26 +#define SLAVE_IPA_CFG 27 +#define SLAVE_IPC_ROUTER_CFG 28 +#define SLAVE_CNOC_MSS 29 +#define SLAVE_ICBDI_MVMSS_CFG 30 +#define SLAVE_PCIE_0_CFG 31 +#define SLAVE_PCIE_1_CFG 32 +#define SLAVE_PCIE_2_CFG 33 +#define SLAVE_PCIE_RSC_CFG 34 +#define SLAVE_PDM 35 +#define SLAVE_PRNG 36 +#define SLAVE_QDSS_CFG 37 +#define SLAVE_QPIC 38 +#define SLAVE_QUP_0 39 +#define SLAVE_SDCC_1 40 +#define SLAVE_SDCC_4 41 +#define SLAVE_SPMI_VGI_COEX 42 +#define SLAVE_TCSR 43 +#define SLAVE_TLMM 44 +#define SLAVE_USB3 45 +#define SLAVE_USB3_PHY_CFG 46 +#define SLAVE_A1NOC_CFG 47 +#define SLAVE_DDRSS_CFG 48 +#define SLAVE_SNOC_GEM_NOC_SF 49 +#define SLAVE_SNOC_CFG 50 +#define SLAVE_PCIE_ANOC_CFG 51 +#define SLAVE_IMEM 52 +#define SLAVE_SERVICE_PCIE_ANOC 53 +#define SLAVE_SERVICE_SNOC 54 +#define SLAVE_PCIE_0 55 +#define SLAVE_PCIE_1 56 +#define SLAVE_PCIE_2 57 +#define SLAVE_QDSS_STM 58 +#define SLAVE_TCU 59 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sm6115.h b/include/dt-bindings/interconnect/qcom,sm6115.h new file mode 100644 index 000000000000..21090e585f05 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sm6115.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H + +/* BIMC */ +#define MASTER_AMPSS_M0 0 +#define MASTER_SNOC_BIMC_RT 1 +#define MASTER_SNOC_BIMC_NRT 2 +#define SNOC_BIMC_MAS 3 +#define MASTER_GRAPHICS_3D 4 +#define MASTER_TCU_0 5 +#define SLAVE_EBI_CH0 6 +#define BIMC_SNOC_SLV 7 + +/* CNOC */ +#define SNOC_CNOC_MAS 0 +#define MASTER_QDSS_DAP 1 +#define SLAVE_AHB2PHY_USB 2 +#define SLAVE_APSS_THROTTLE_CFG 3 +#define SLAVE_BIMC_CFG 4 +#define SLAVE_BOOT_ROM 5 +#define SLAVE_CAMERA_NRT_THROTTLE_CFG 6 +#define SLAVE_CAMERA_RT_THROTTLE_CFG 7 +#define SLAVE_CAMERA_CFG 8 +#define SLAVE_CLK_CTL 9 +#define SLAVE_RBCPR_CX_CFG 10 +#define SLAVE_RBCPR_MX_CFG 11 +#define SLAVE_CRYPTO_0_CFG 12 +#define SLAVE_DCC_CFG 13 +#define SLAVE_DDR_PHY_CFG 14 +#define SLAVE_DDR_SS_CFG 15 +#define SLAVE_DISPLAY_CFG 16 +#define SLAVE_DISPLAY_THROTTLE_CFG 17 +#define SLAVE_GPU_CFG 18 +#define SLAVE_GPU_THROTTLE_CFG 19 +#define SLAVE_HWKM_CORE 20 +#define SLAVE_IMEM_CFG 21 +#define SLAVE_IPA_CFG 22 +#define SLAVE_LPASS 23 +#define SLAVE_MAPSS 24 +#define SLAVE_MDSP_MPU_CFG 25 +#define SLAVE_MESSAGE_RAM 26 +#define SLAVE_CNOC_MSS 27 +#define SLAVE_PDM 28 +#define SLAVE_PIMEM_CFG 29 +#define SLAVE_PKA_CORE 30 +#define SLAVE_PMIC_ARB 31 +#define SLAVE_QDSS_CFG 32 +#define SLAVE_QM_CFG 33 +#define SLAVE_QM_MPU_CFG 34 +#define SLAVE_QPIC 35 +#define SLAVE_QUP_0 36 +#define SLAVE_RPM 37 +#define SLAVE_SDCC_1 38 +#define SLAVE_SDCC_2 39 +#define SLAVE_SECURITY 40 +#define SLAVE_SNOC_CFG 41 +#define SLAVE_TCSR 42 +#define SLAVE_TLMM 43 +#define SLAVE_USB3 44 +#define SLAVE_VENUS_CFG 45 +#define SLAVE_VENUS_THROTTLE_CFG 46 +#define SLAVE_VSENSE_CTRL_CFG 47 +#define SLAVE_SERVICE_CNOC 48 + +/* SNOC */ +#define MASTER_CRYPTO_CORE0 0 +#define MASTER_SNOC_CFG 1 +#define MASTER_TIC 2 +#define MASTER_ANOC_SNOC 3 +#define BIMC_SNOC_MAS 4 +#define MASTER_PIMEM 5 +#define MASTER_QDSS_BAM 6 +#define MASTER_QPIC 7 +#define MASTER_QUP_0 8 +#define MASTER_IPA 9 +#define MASTER_QDSS_ETR 10 +#define MASTER_SDCC_1 11 +#define MASTER_SDCC_2 12 +#define MASTER_USB3 13 +#define SLAVE_APPSS 14 +#define SNOC_CNOC_SLV 15 +#define SLAVE_OCIMEM 16 +#define SLAVE_PIMEM 17 +#define SNOC_BIMC_SLV 18 +#define SLAVE_SERVICE_SNOC 19 +#define SLAVE_QDSS_STM 20 +#define SLAVE_TCU 21 +#define SLAVE_ANOC_SNOC 22 + +/* CLK Virtual */ +#define MASTER_QUP_CORE_0 0 +#define SLAVE_QUP_CORE_0 1 + +/* MMRT Virtual */ +#define MASTER_CAMNOC_HF 0 +#define MASTER_MDP_PORT0 1 +#define SLAVE_SNOC_BIMC_RT 2 + +/* MMNRT Virtual */ +#define MASTER_CAMNOC_SF 0 +#define MASTER_VIDEO_P0 1 +#define MASTER_VIDEO_PROC 2 +#define SLAVE_SNOC_BIMC_NRT 3 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h new file mode 100644 index 000000000000..6c1eaf04e241 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H + +#define MASTER_QSPI_0 0 +#define MASTER_QUP_1 1 +#define MASTER_QUP_3 2 +#define MASTER_SDCC_4 3 +#define MASTER_UFS_MEM 4 +#define MASTER_USB3_0 5 +#define SLAVE_A1NOC_SNOC 6 + +#define MASTER_QDSS_BAM 0 +#define MASTER_QUP_2 1 +#define MASTER_CRYPTO 2 +#define MASTER_IPA 3 +#define MASTER_SP 4 +#define MASTER_QDSS_ETR 5 +#define MASTER_QDSS_ETR_1 6 +#define MASTER_SDCC_2 7 +#define SLAVE_A2NOC_SNOC 8 + +#define MASTER_QUP_CORE_0 0 +#define MASTER_QUP_CORE_1 1 +#define MASTER_QUP_CORE_2 2 +#define SLAVE_QUP_CORE_0 3 +#define SLAVE_QUP_CORE_1 4 +#define SLAVE_QUP_CORE_2 5 + +#define MASTER_CNOC_CFG 0 +#define SLAVE_AHB2PHY_SOUTH 1 +#define SLAVE_AHB2PHY_NORTH 2 +#define SLAVE_CAMERA_CFG 3 +#define SLAVE_CLK_CTL 4 +#define SLAVE_RBCPR_CX_CFG 5 +#define SLAVE_CPR_HMX 6 +#define SLAVE_RBCPR_MMCX_CFG 7 +#define SLAVE_RBCPR_MXA_CFG 8 +#define SLAVE_RBCPR_MXC_CFG 9 +#define SLAVE_CPR_NSPCX 10 +#define SLAVE_CRYPTO_0_CFG 11 +#define SLAVE_CX_RDPM 12 +#define SLAVE_DISPLAY_CFG 13 +#define SLAVE_GFX3D_CFG 14 +#define SLAVE_I2C 15 +#define SLAVE_I3C_IBI0_CFG 16 +#define SLAVE_I3C_IBI1_CFG 17 +#define SLAVE_IMEM_CFG 18 +#define SLAVE_CNOC_MSS 19 +#define SLAVE_MX_2_RDPM 20 +#define SLAVE_MX_RDPM 21 +#define SLAVE_PCIE_0_CFG 22 +#define SLAVE_PCIE_1_CFG 23 +#define SLAVE_PCIE_RSCC 24 +#define SLAVE_PDM 25 +#define SLAVE_PRNG 26 +#define SLAVE_QDSS_CFG 27 +#define SLAVE_QSPI_0 28 +#define SLAVE_QUP_3 29 +#define SLAVE_QUP_1 30 +#define SLAVE_QUP_2 31 +#define SLAVE_SDCC_2 32 +#define SLAVE_SDCC_4 33 +#define SLAVE_SPSS_CFG 34 +#define SLAVE_TCSR 35 +#define SLAVE_TLMM 36 +#define SLAVE_UFS_MEM_CFG 37 +#define SLAVE_USB3_0 38 +#define SLAVE_VENUS_CFG 39 +#define SLAVE_VSENSE_CTRL_CFG 40 +#define SLAVE_CNOC_MNOC_CFG 41 +#define SLAVE_NSP_QTB_CFG 42 +#define SLAVE_PCIE_ANOC_CFG 43 +#define SLAVE_SERVICE_CNOC_CFG 44 +#define SLAVE_QDSS_STM 45 +#define SLAVE_TCU 46 + +#define MASTER_GEM_NOC_CNOC 0 +#define MASTER_GEM_NOC_PCIE_SNOC 1 +#define SLAVE_AOSS 2 +#define SLAVE_IPA_CFG 3 +#define SLAVE_IPC_ROUTER_CFG 4 +#define SLAVE_TME_CFG 5 +#define SLAVE_APPSS 6 +#define SLAVE_CNOC_CFG 7 +#define SLAVE_DDRSS_CFG 8 +#define SLAVE_IMEM 9 +#define SLAVE_SERVICE_CNOC 10 +#define SLAVE_PCIE_0 11 +#define SLAVE_PCIE_1 12 + +#define MASTER_GPU_TCU 0 +#define MASTER_SYS_TCU 1 +#define MASTER_UBWC_P_TCU 2 +#define MASTER_APPSS_PROC 3 +#define MASTER_GFX3D 4 +#define MASTER_LPASS_GEM_NOC 5 +#define MASTER_MSS_PROC 6 +#define MASTER_MNOC_HF_MEM_NOC 7 +#define MASTER_MNOC_SF_MEM_NOC 8 +#define MASTER_COMPUTE_NOC 9 +#define MASTER_ANOC_PCIE_GEM_NOC 10 +#define MASTER_SNOC_SF_MEM_NOC 11 +#define MASTER_UBWC_P 12 +#define MASTER_GIC 13 +#define SLAVE_GEM_NOC_CNOC 14 +#define SLAVE_LLCC 15 +#define SLAVE_MEM_NOC_PCIE_SNOC 16 + +#define MASTER_LPIAON_NOC 0 +#define SLAVE_LPASS_GEM_NOC 1 + +#define MASTER_LPASS_LPINOC 0 +#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1 + +#define MASTER_LPASS_PROC 0 +#define SLAVE_LPICX_NOC_LPIAON_NOC 1 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_CAMNOC_HF 0 +#define MASTER_CAMNOC_ICP 1 +#define MASTER_CAMNOC_SF 2 +#define MASTER_MDP 3 +#define MASTER_CDSP_HCP 4 +#define MASTER_VIDEO 5 +#define MASTER_VIDEO_CV_PROC 6 +#define MASTER_VIDEO_PROC 7 +#define MASTER_VIDEO_V_PROC 8 +#define MASTER_CNOC_MNOC_CFG 9 +#define SLAVE_MNOC_HF_MEM_NOC 10 +#define SLAVE_MNOC_SF_MEM_NOC 11 +#define SLAVE_SERVICE_MNOC 12 + +#define MASTER_CDSP_PROC 0 +#define SLAVE_CDSP_MEM_NOC 1 + +#define MASTER_PCIE_ANOC_CFG 0 +#define MASTER_PCIE_0 1 +#define MASTER_PCIE_1 2 +#define SLAVE_ANOC_PCIE_GEM_NOC 3 +#define SLAVE_SERVICE_PCIE_ANOC 4 + +#define MASTER_A1NOC_SNOC 0 +#define MASTER_A2NOC_SNOC 1 +#define SLAVE_SNOC_GEM_NOC_SF 2 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h new file mode 100644 index 000000000000..a38c3472698a --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H + +#define MASTER_QSPI_0 0 +#define MASTER_QUP_1 1 +#define MASTER_SDCC_4 2 +#define MASTER_UFS_MEM 3 +#define SLAVE_A1NOC_SNOC 4 + +#define MASTER_QUP_0 0 +#define MASTER_QUP_2 1 +#define MASTER_CRYPTO 2 +#define MASTER_SP 3 +#define MASTER_QDSS_ETR 4 +#define MASTER_QDSS_ETR_1 5 +#define MASTER_SDCC_2 6 +#define SLAVE_A2NOC_SNOC 7 + +#define MASTER_DDR_PERF_MODE 0 +#define MASTER_QUP_CORE_0 1 +#define MASTER_QUP_CORE_1 2 +#define MASTER_QUP_CORE_2 3 +#define SLAVE_DDR_PERF_MODE 4 +#define SLAVE_QUP_CORE_0 5 +#define SLAVE_QUP_CORE_1 6 +#define SLAVE_QUP_CORE_2 7 + +#define MASTER_CNOC_CFG 0 +#define SLAVE_AHB2PHY_SOUTH 1 +#define SLAVE_AHB2PHY_NORTH 2 +#define SLAVE_AHB2PHY_2 3 +#define SLAVE_AV1_ENC_CFG 4 +#define SLAVE_CAMERA_CFG 5 +#define SLAVE_CLK_CTL 6 +#define SLAVE_CRYPTO_0_CFG 7 +#define SLAVE_DISPLAY_CFG 8 +#define SLAVE_GFX3D_CFG 9 +#define SLAVE_IMEM_CFG 10 +#define SLAVE_IPC_ROUTER_CFG 11 +#define SLAVE_PCIE_0_CFG 12 +#define SLAVE_PCIE_1_CFG 13 +#define SLAVE_PCIE_2_CFG 14 +#define SLAVE_PCIE_3_CFG 15 +#define SLAVE_PCIE_4_CFG 16 +#define SLAVE_PCIE_5_CFG 17 +#define SLAVE_PCIE_6A_CFG 18 +#define SLAVE_PCIE_6B_CFG 19 +#define SLAVE_PCIE_RSC_CFG 20 +#define SLAVE_PDM 21 +#define SLAVE_PRNG 22 +#define SLAVE_QDSS_CFG 23 +#define SLAVE_QSPI_0 24 +#define SLAVE_QUP_0 25 +#define SLAVE_QUP_1 26 +#define SLAVE_QUP_2 27 +#define SLAVE_SDCC_2 28 +#define SLAVE_SDCC_4 29 +#define SLAVE_SMMUV3_CFG 30 +#define SLAVE_TCSR 31 +#define SLAVE_TLMM 32 +#define SLAVE_UFS_MEM_CFG 33 +#define SLAVE_USB2 34 +#define SLAVE_USB3_0 35 +#define SLAVE_USB3_1 36 +#define SLAVE_USB3_2 37 +#define SLAVE_USB3_MP 38 +#define SLAVE_USB4_0 39 +#define SLAVE_USB4_1 40 +#define SLAVE_USB4_2 41 +#define SLAVE_VENUS_CFG 42 +#define SLAVE_LPASS_QTB_CFG 43 +#define SLAVE_CNOC_MNOC_CFG 44 +#define SLAVE_NSP_QTB_CFG 45 +#define SLAVE_QDSS_STM 46 +#define SLAVE_TCU 47 + +#define MASTER_GEM_NOC_CNOC 0 +#define MASTER_GEM_NOC_PCIE_SNOC 1 +#define SLAVE_AOSS 2 +#define SLAVE_TME_CFG 3 +#define SLAVE_APPSS 4 +#define SLAVE_CNOC_CFG 5 +#define SLAVE_BOOT_IMEM 6 +#define SLAVE_IMEM 7 +#define SLAVE_PCIE_0 8 +#define SLAVE_PCIE_1 9 +#define SLAVE_PCIE_2 10 +#define SLAVE_PCIE_3 11 +#define SLAVE_PCIE_4 12 +#define SLAVE_PCIE_5 13 +#define SLAVE_PCIE_6A 14 +#define SLAVE_PCIE_6B 15 + +#define MASTER_GPU_TCU 0 +#define MASTER_PCIE_TCU 1 +#define MASTER_SYS_TCU 2 +#define MASTER_APPSS_PROC 3 +#define MASTER_GFX3D 4 +#define MASTER_LPASS_GEM_NOC 5 +#define MASTER_MNOC_HF_MEM_NOC 6 +#define MASTER_MNOC_SF_MEM_NOC 7 +#define MASTER_COMPUTE_NOC 8 +#define MASTER_ANOC_PCIE_GEM_NOC 9 +#define MASTER_SNOC_SF_MEM_NOC 10 +#define MASTER_GIC2 11 +#define SLAVE_GEM_NOC_CNOC 12 +#define SLAVE_LLCC 13 +#define SLAVE_MEM_NOC_PCIE_SNOC 14 +#define MASTER_MNOC_HF_MEM_NOC_DISP 15 +#define MASTER_ANOC_PCIE_GEM_NOC_DISP 16 +#define SLAVE_LLCC_DISP 17 +#define MASTER_ANOC_PCIE_GEM_NOC_PCIE 18 +#define SLAVE_LLCC_PCIE 19 + +#define MASTER_LPIAON_NOC 0 +#define SLAVE_LPASS_GEM_NOC 1 + +#define MASTER_LPASS_LPINOC 0 +#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1 + +#define MASTER_LPASS_PROC 0 +#define SLAVE_LPICX_NOC_LPIAON_NOC 1 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 +#define MASTER_LLCC_DISP 2 +#define SLAVE_EBI1_DISP 3 +#define MASTER_LLCC_PCIE 4 +#define SLAVE_EBI1_PCIE 5 + +#define MASTER_AV1_ENC 0 +#define MASTER_CAMNOC_HF 1 +#define MASTER_CAMNOC_ICP 2 +#define MASTER_CAMNOC_SF 3 +#define MASTER_EVA 4 +#define MASTER_MDP 5 +#define MASTER_VIDEO 6 +#define MASTER_VIDEO_CV_PROC 7 +#define MASTER_VIDEO_V_PROC 8 +#define MASTER_CNOC_MNOC_CFG 9 +#define SLAVE_MNOC_HF_MEM_NOC 10 +#define SLAVE_MNOC_SF_MEM_NOC 11 +#define SLAVE_SERVICE_MNOC 12 +#define MASTER_MDP_DISP 13 +#define SLAVE_MNOC_HF_MEM_NOC_DISP 14 + +#define MASTER_CDSP_PROC 0 +#define SLAVE_CDSP_MEM_NOC 1 + +#define MASTER_PCIE_NORTH 0 +#define MASTER_PCIE_SOUTH 1 +#define SLAVE_ANOC_PCIE_GEM_NOC 2 +#define MASTER_PCIE_NORTH_PCIE 3 +#define MASTER_PCIE_SOUTH_PCIE 4 +#define SLAVE_ANOC_PCIE_GEM_NOC_PCIE 5 + +#define MASTER_PCIE_3 0 +#define MASTER_PCIE_4 1 +#define MASTER_PCIE_5 2 +#define SLAVE_PCIE_NORTH 3 +#define MASTER_PCIE_3_PCIE 4 +#define MASTER_PCIE_4_PCIE 5 +#define MASTER_PCIE_5_PCIE 6 +#define SLAVE_PCIE_NORTH_PCIE 7 + +#define MASTER_PCIE_0 0 +#define MASTER_PCIE_1 1 +#define MASTER_PCIE_2 2 +#define MASTER_PCIE_6A 3 +#define MASTER_PCIE_6B 4 +#define SLAVE_PCIE_SOUTH 5 +#define MASTER_PCIE_0_PCIE 6 +#define MASTER_PCIE_1_PCIE 7 +#define MASTER_PCIE_2_PCIE 8 +#define MASTER_PCIE_6A_PCIE 9 +#define MASTER_PCIE_6B_PCIE 10 +#define SLAVE_PCIE_SOUTH_PCIE 11 + +#define MASTER_A1NOC_SNOC 0 +#define MASTER_A2NOC_SNOC 1 +#define MASTER_GIC1 2 +#define MASTER_USB_NOC_SNOC 3 +#define SLAVE_SNOC_GEM_NOC_SF 4 + +#define MASTER_AGGRE_USB_NORTH 0 +#define MASTER_AGGRE_USB_SOUTH 1 +#define SLAVE_USB_NOC_SNOC 2 + +#define MASTER_USB2 0 +#define MASTER_USB3_MP 1 +#define SLAVE_AGGRE_USB_NORTH 2 + +#define MASTER_USB3_0 0 +#define MASTER_USB3_1 1 +#define MASTER_USB3_2 2 +#define MASTER_USB4_0 3 +#define MASTER_USB4_1 4 +#define MASTER_USB4_2 5 +#define SLAVE_AGGRE_USB_SOUTH 6 + +#endif diff --git a/include/dt-bindings/power/amlogic,t7-pwrc.h b/include/dt-bindings/power/amlogic,t7-pwrc.h new file mode 100644 index 000000000000..1f1f2739cc26 --- /dev/null +++ b/include/dt-bindings/power/amlogic,t7-pwrc.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ +/* + * Copyright (c) 2023 Amlogic, Inc. + * Author: Hongyu Chen <hongyu.chen1@amlogic.com> + */ +#ifndef _DT_BINDINGS_AMLOGIC_T7_POWER_H +#define _DT_BINDINGS_AMLOGIC_T7_POWER_H + +#define PWRC_T7_DSPA_ID 0 +#define PWRC_T7_DSPB_ID 1 +#define PWRC_T7_DOS_HCODEC_ID 2 +#define PWRC_T7_DOS_HEVC_ID 3 +#define PWRC_T7_DOS_VDEC_ID 4 +#define PWRC_T7_DOS_WAVE_ID 5 +#define PWRC_T7_VPU_HDMI_ID 6 +#define PWRC_T7_USB_COMB_ID 7 +#define PWRC_T7_PCIE_ID 8 +#define PWRC_T7_GE2D_ID 9 +#define PWRC_T7_SRAMA_ID 10 +#define PWRC_T7_SRAMB_ID 11 +#define PWRC_T7_HDMIRX_ID 12 +#define PWRC_T7_VI_CLK1_ID 13 +#define PWRC_T7_VI_CLK2_ID 14 +#define PWRC_T7_ETH_ID 15 +#define PWRC_T7_ISP_ID 16 +#define PWRC_T7_MIPI_ISP_ID 17 +#define PWRC_T7_GDC_ID 18 +#define PWRC_T7_CVE_ID 18 +#define PWRC_T7_DEWARP_ID 19 +#define PWRC_T7_SDIO_A_ID 20 +#define PWRC_T7_SDIO_B_ID 21 +#define PWRC_T7_EMMC_ID 22 +#define PWRC_T7_MALI_SC0_ID 23 +#define PWRC_T7_MALI_SC1_ID 24 +#define PWRC_T7_MALI_SC2_ID 25 +#define PWRC_T7_MALI_SC3_ID 26 +#define PWRC_T7_MALI_TOP_ID 27 +#define PWRC_T7_NNA_CORE0_ID 28 +#define PWRC_T7_NNA_CORE1_ID 29 +#define PWRC_T7_NNA_CORE2_ID 30 +#define PWRC_T7_NNA_CORE3_ID 31 +#define PWRC_T7_NNA_TOP_ID 32 +#define PWRC_T7_DDR0_ID 33 +#define PWRC_T7_DDR1_ID 34 +#define PWRC_T7_DMC0_ID 35 +#define PWRC_T7_DMC1_ID 36 +#define PWRC_T7_NOC_ID 37 +#define PWRC_T7_NIC2_ID 38 +#define PWRC_T7_NIC3_ID 39 +#define PWRC_T7_CCI_ID 40 +#define PWRC_T7_MIPI_DSI0_ID 41 +#define PWRC_T7_SPICC0_ID 42 +#define PWRC_T7_SPICC1_ID 43 +#define PWRC_T7_SPICC2_ID 44 +#define PWRC_T7_SPICC3_ID 45 +#define PWRC_T7_SPICC4_ID 46 +#define PWRC_T7_SPICC5_ID 47 +#define PWRC_T7_EDP0_ID 48 +#define PWRC_T7_EDP1_ID 49 +#define PWRC_T7_MIPI_DSI1_ID 50 +#define PWRC_T7_AUDIO_ID 51 + +#endif diff --git a/include/dt-bindings/power/mediatek,mt8365-power.h b/include/dt-bindings/power/mediatek,mt8365-power.h new file mode 100644 index 000000000000..e6cfd0ec7871 --- /dev/null +++ b/include/dt-bindings/power/mediatek,mt8365-power.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (c) 2022 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_POWER_MT8365_POWER_H +#define _DT_BINDINGS_POWER_MT8365_POWER_H + +#define MT8365_POWER_DOMAIN_MM 0 +#define MT8365_POWER_DOMAIN_CONN 1 +#define MT8365_POWER_DOMAIN_MFG 2 +#define MT8365_POWER_DOMAIN_AUDIO 3 +#define MT8365_POWER_DOMAIN_CAM 4 +#define MT8365_POWER_DOMAIN_DSP 5 +#define MT8365_POWER_DOMAIN_VDEC 6 +#define MT8365_POWER_DOMAIN_VENC 7 +#define MT8365_POWER_DOMAIN_APU 8 + +#endif /* _DT_BINDINGS_POWER_MT8365_POWER_H */ diff --git a/include/dt-bindings/power/meson-g12a-power.h b/include/dt-bindings/power/meson-g12a-power.h index 44ec0c50e340..01fd0ac4dd08 100644 --- a/include/dt-bindings/power/meson-g12a-power.h +++ b/include/dt-bindings/power/meson-g12a-power.h @@ -10,5 +10,6 @@ #define PWRC_G12A_VPU_ID 0 #define PWRC_G12A_ETH_ID 1 #define PWRC_G12A_NNA_ID 2 +#define PWRC_G12A_ISP_ID 3 #endif diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h index 7c201a66bc69..e54ffa361451 100644 --- a/include/dt-bindings/power/qcom,rpmhpd.h +++ b/include/dt-bindings/power/qcom,rpmhpd.h @@ -26,5 +26,7 @@ #define RPMHPD_QPHY 16 #define RPMHPD_DDR 17 #define RPMHPD_XO 18 +#define RPMHPD_NSP2 19 +#define RPMHPD_GMXC 20 #endif diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index 83be996cb5eb..7f4e2983a4c5 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -278,6 +278,27 @@ #define MSM8909_VDDMX MSM8916_VDDMX #define MSM8909_VDDMX_AO MSM8916_VDDMX_AO +/* MSM8917 Power Domain Indexes */ +#define MSM8917_VDDCX 0 +#define MSM8917_VDDCX_AO 1 +#define MSM8917_VDDCX_VFL 2 +#define MSM8917_VDDMX 3 +#define MSM8917_VDDMX_AO 4 + +/* MSM8937 Power Domain Indexes */ +#define MSM8937_VDDCX MSM8917_VDDCX +#define MSM8937_VDDCX_AO MSM8917_VDDCX_AO +#define MSM8937_VDDCX_VFL MSM8917_VDDCX_VFL +#define MSM8937_VDDMX MSM8917_VDDMX +#define MSM8937_VDDMX_AO MSM8917_VDDMX_AO + +/* QM215 Power Domain Indexes */ +#define QM215_VDDCX MSM8917_VDDCX +#define QM215_VDDCX_AO MSM8917_VDDCX_AO +#define QM215_VDDCX_VFL MSM8917_VDDCX_VFL +#define QM215_VDDMX MSM8917_VDDMX +#define QM215_VDDMX_AO MSM8917_VDDMX_AO + /* MSM8953 Power Domain Indexes */ #define MSM8953_VDDMD 0 #define MSM8953_VDDMD_AO 1 diff --git a/include/dt-bindings/power/starfive,jh7110-pmu.h b/include/dt-bindings/power/starfive,jh7110-pmu.h index 132bfe401fc8..7b4f24927dee 100644 --- a/include/dt-bindings/power/starfive,jh7110-pmu.h +++ b/include/dt-bindings/power/starfive,jh7110-pmu.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ /* - * Copyright (C) 2022 StarFive Technology Co., Ltd. + * Copyright (C) 2022-2023 StarFive Technology Co., Ltd. * Author: Walker Chen <walker.chen@starfivetech.com> */ #ifndef __DT_BINDINGS_POWER_JH7110_POWER_H__ @@ -14,4 +14,8 @@ #define JH7110_PD_ISP 5 #define JH7110_PD_VENC 6 +/* AON Power Domain */ +#define JH7110_AON_PD_DPHY_TX 0 +#define JH7110_AON_PD_DPHY_RX 1 + #endif diff --git a/include/dt-bindings/reset/amlogic,c3-reset.h b/include/dt-bindings/reset/amlogic,c3-reset.h new file mode 100644 index 000000000000..d9127863f603 --- /dev/null +++ b/include/dt-bindings/reset/amlogic,c3-reset.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ +/* + * Copyright (c) 2023 Amlogic, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_AMLOGIC_C3_RESET_H +#define _DT_BINDINGS_AMLOGIC_C3_RESET_H + +/* RESET0 */ +/* 0-3 */ +#define RESET_USBCTRL 4 +/* 5-7 */ +#define RESET_USBPHY20 8 +/* 9 */ +#define RESET_USB2DRD 10 +#define RESET_MIPI_DSI_HOST 11 +#define RESET_MIPI_DSI_PHY 12 +/* 13-20 */ +#define RESET_GE2D 21 +#define RESET_DWAP 22 +/* 23-31 */ + +/* RESET1 */ +#define RESET_AUDIO 32 +/* 33-34 */ +#define RESET_DDRAPB 35 +#define RESET_DDR 36 +#define RESET_DOS_CAPB3 37 +#define RESET_DOS 38 +/* 39-46 */ +#define RESET_NNA 47 +#define RESET_ETHERNET 48 +#define RESET_ISP 49 +#define RESET_VC9000E_APB 50 +#define RESET_VC9000E_A 51 +/* 52 */ +#define RESET_VC9000E_CORE 53 +/* 54-63 */ + +/* RESET2 */ +#define RESET_ABUS_ARB 64 +#define RESET_IRCTRL 65 +/* 66 */ +#define RESET_TEMP_PII 67 +/* 68-72 */ +#define RESET_SPICC_0 73 +#define RESET_SPICC_1 74 +#define RESET_RSA 75 + +/* 76-79 */ +#define RESET_MSR_CLK 80 +#define RESET_SPIFC 81 +#define RESET_SAR_ADC 82 +/* 83-87 */ +#define RESET_ACODEC 88 +/* 89-90 */ +#define RESET_WATCHDOG 91 +/* 92-95 */ + +/* RESET3 */ +#define RESET_ISP_NIC_GPV 96 +#define RESET_ISP_NIC_MAIN 97 +#define RESET_ISP_NIC_VCLK 98 +#define RESET_ISP_NIC_VOUT 99 +#define RESET_ISP_NIC_ALL 100 +#define RESET_VOUT 101 +#define RESET_VOUT_VENC 102 +/* 103 */ +#define RESET_CVE_NIC_GPV 104 +#define RESET_CVE_NIC_MAIN 105 +#define RESET_CVE_NIC_GE2D 106 +#define RESET_CVE_NIC_DW 106 +#define RESET_CVE_NIC_CVE 108 +#define RESET_CVE_NIC_ALL 109 +#define RESET_CVE 110 +/* 112-127 */ + +/* RESET4 */ +#define RESET_RTC 128 +#define RESET_PWM_AB 129 +#define RESET_PWM_CD 130 +#define RESET_PWM_EF 131 +#define RESET_PWM_GH 132 +#define RESET_PWM_IJ 133 +#define RESET_PWM_KL 134 +#define RESET_PWM_MN 135 +/* 136-137 */ +#define RESET_UART_A 138 +#define RESET_UART_B 139 +#define RESET_UART_C 140 +#define RESET_UART_D 141 +#define RESET_UART_E 142 +#define RESET_UART_F 143 +#define RESET_I2C_S_A 144 +#define RESET_I2C_M_A 145 +#define RESET_I2C_M_B 146 +#define RESET_I2C_M_C 147 +#define RESET_I2C_M_D 148 +/* 149-151 */ +#define RESET_SD_EMMC_A 152 +#define RESET_SD_EMMC_B 153 +#define RESET_SD_EMMC_C 154 + +/* RESET5 */ +/* 160-172 */ +#define RESET_BRG_NIC_NNA 173 +#define RESET_BRG_MUX_NIC_MAIN 174 +#define RESET_BRG_AO_NIC_ALL 175 +/* 176-183 */ +#define RESET_BRG_NIC_VAPB 184 +#define RESET_BRG_NIC_SDIO_B 185 +#define RESET_BRG_NIC_SDIO_A 186 +#define RESET_BRG_NIC_EMMC 187 +#define RESET_BRG_NIC_DSU 188 +#define RESET_BRG_NIC_SYSCLK 189 +#define RESET_BRG_NIC_MAIN 190 +#define RESET_BRG_NIC_ALL 191 + +#endif diff --git a/include/dt-bindings/reset/mediatek,mt7988-resets.h b/include/dt-bindings/reset/mediatek,mt7988-resets.h new file mode 100644 index 000000000000..493301971367 --- /dev/null +++ b/include/dt-bindings/reset/mediatek,mt7988-resets.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org> + * Author: Daniel Golle <daniel@makrotopia.org> + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7988 +#define _DT_BINDINGS_RESET_CONTROLLER_MT7988 + +/* ETHWARP resets */ +#define MT7988_ETHWARP_RST_SWITCH 0 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7988 */ diff --git a/include/dt-bindings/reset/mt8188-resets.h b/include/dt-bindings/reset/mt8188-resets.h index ba9a5e9b8899..5a58c54e7d20 100644 --- a/include/dt-bindings/reset/mt8188-resets.h +++ b/include/dt-bindings/reset/mt8188-resets.h @@ -38,4 +38,79 @@ #define MT8188_INFRA_RST1_THERMAL_CTRL_RST 1 #define MT8188_INFRA_RST3_PTP_CTRL_RST 2 +#define MT8188_VDO0_RST_DISP_OVL0 0 +#define MT8188_VDO0_RST_FAKE_ENG0 1 +#define MT8188_VDO0_RST_DISP_CCORR0 2 +#define MT8188_VDO0_RST_DISP_MUTEX0 3 +#define MT8188_VDO0_RST_DISP_GAMMA0 4 +#define MT8188_VDO0_RST_DISP_DITHER0 5 +#define MT8188_VDO0_RST_DISP_WDMA0 6 +#define MT8188_VDO0_RST_DISP_RDMA0 7 +#define MT8188_VDO0_RST_DSI0 8 +#define MT8188_VDO0_RST_DSI1 9 +#define MT8188_VDO0_RST_DSC_WRAP0 10 +#define MT8188_VDO0_RST_VPP_MERGE0 11 +#define MT8188_VDO0_RST_DP_INTF0 12 +#define MT8188_VDO0_RST_DISP_AAL0 13 +#define MT8188_VDO0_RST_INLINEROT0 14 +#define MT8188_VDO0_RST_APB_BUS 15 +#define MT8188_VDO0_RST_DISP_COLOR0 16 +#define MT8188_VDO0_RST_MDP_WROT0 17 +#define MT8188_VDO0_RST_DISP_RSZ0 18 + +#define MT8188_VDO1_RST_SMI_LARB2 0 +#define MT8188_VDO1_RST_SMI_LARB3 1 +#define MT8188_VDO1_RST_GALS 2 +#define MT8188_VDO1_RST_FAKE_ENG0 3 +#define MT8188_VDO1_RST_FAKE_ENG1 4 +#define MT8188_VDO1_RST_MDP_RDMA0 5 +#define MT8188_VDO1_RST_MDP_RDMA1 6 +#define MT8188_VDO1_RST_MDP_RDMA2 7 +#define MT8188_VDO1_RST_MDP_RDMA3 8 +#define MT8188_VDO1_RST_VPP_MERGE0 9 +#define MT8188_VDO1_RST_VPP_MERGE1 10 +#define MT8188_VDO1_RST_VPP_MERGE2 11 +#define MT8188_VDO1_RST_VPP_MERGE3 12 +#define MT8188_VDO1_RST_VPP_MERGE4 13 +#define MT8188_VDO1_RST_VPP2_TO_VDO1_DL_ASYNC 14 +#define MT8188_VDO1_RST_VPP3_TO_VDO1_DL_ASYNC 15 +#define MT8188_VDO1_RST_DISP_MUTEX 16 +#define MT8188_VDO1_RST_MDP_RDMA4 17 +#define MT8188_VDO1_RST_MDP_RDMA5 18 +#define MT8188_VDO1_RST_MDP_RDMA6 19 +#define MT8188_VDO1_RST_MDP_RDMA7 20 +#define MT8188_VDO1_RST_DP_INTF1_MMCK 21 +#define MT8188_VDO1_RST_DPI0_MM_CK 22 +#define MT8188_VDO1_RST_DPI1_MM_CK 23 +#define MT8188_VDO1_RST_MERGE0_DL_ASYNC 24 +#define MT8188_VDO1_RST_MERGE1_DL_ASYNC 25 +#define MT8188_VDO1_RST_MERGE2_DL_ASYNC 26 +#define MT8188_VDO1_RST_MERGE3_DL_ASYNC 27 +#define MT8188_VDO1_RST_MERGE4_DL_ASYNC 28 +#define MT8188_VDO1_RST_VDO0_DSC_TO_VDO1_DL_ASYNC 29 +#define MT8188_VDO1_RST_VDO0_MERGE_TO_VDO1_DL_ASYNC 30 +#define MT8188_VDO1_RST_PADDING0 31 +#define MT8188_VDO1_RST_PADDING1 32 +#define MT8188_VDO1_RST_PADDING2 33 +#define MT8188_VDO1_RST_PADDING3 34 +#define MT8188_VDO1_RST_PADDING4 35 +#define MT8188_VDO1_RST_PADDING5 36 +#define MT8188_VDO1_RST_PADDING6 37 +#define MT8188_VDO1_RST_PADDING7 38 +#define MT8188_VDO1_RST_DISP_RSZ0 39 +#define MT8188_VDO1_RST_DISP_RSZ1 40 +#define MT8188_VDO1_RST_DISP_RSZ2 41 +#define MT8188_VDO1_RST_DISP_RSZ3 42 +#define MT8188_VDO1_RST_HDR_VDO_FE0 43 +#define MT8188_VDO1_RST_HDR_GFX_FE0 44 +#define MT8188_VDO1_RST_HDR_VDO_BE 45 +#define MT8188_VDO1_RST_HDR_VDO_FE1 46 +#define MT8188_VDO1_RST_HDR_GFX_FE1 47 +#define MT8188_VDO1_RST_DISP_MIXER 48 +#define MT8188_VDO1_RST_HDR_VDO_FE0_DL_ASYNC 49 +#define MT8188_VDO1_RST_HDR_VDO_FE1_DL_ASYNC 50 +#define MT8188_VDO1_RST_HDR_GFX_FE0_DL_ASYNC 51 +#define MT8188_VDO1_RST_HDR_GFX_FE1_DL_ASYNC 52 +#define MT8188_VDO1_RST_HDR_VDO_BE_DL_ASYNC 53 + #endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8188 */ diff --git a/include/dt-bindings/reset/qcom,sm8650-gpucc.h b/include/dt-bindings/reset/qcom,sm8650-gpucc.h new file mode 100644 index 000000000000..f021a6cccc66 --- /dev/null +++ b/include/dt-bindings/reset/qcom,sm8650-gpucc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H +#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H + +#define GPUCC_GPU_CC_ACD_BCR 0 +#define GPUCC_GPU_CC_CX_BCR 1 +#define GPUCC_GPU_CC_FAST_HUB_BCR 2 +#define GPUCC_GPU_CC_FF_BCR 3 +#define GPUCC_GPU_CC_GFX3D_AON_BCR 4 +#define GPUCC_GPU_CC_GMU_BCR 5 +#define GPUCC_GPU_CC_GX_BCR 6 +#define GPUCC_GPU_CC_XO_BCR 7 +#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 8 + +#endif diff --git a/include/dt-bindings/reset/st,stm32mp25-rcc.h b/include/dt-bindings/reset/st,stm32mp25-rcc.h new file mode 100644 index 000000000000..d5615930bcc8 --- /dev/null +++ b/include/dt-bindings/reset/st,stm32mp25-rcc.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (C) STMicroelectronics 2023 - All Rights Reserved + * Author(s): Gabriel Fernandez <gabriel.fernandez@foss.st.com> + */ + +#ifndef _DT_BINDINGS_STM32MP25_RESET_H_ +#define _DT_BINDINGS_STM32MP25_RESET_H_ + +#define TIM1_R 0 +#define TIM2_R 1 +#define TIM3_R 2 +#define TIM4_R 3 +#define TIM5_R 4 +#define TIM6_R 5 +#define TIM7_R 6 +#define TIM8_R 7 +#define TIM10_R 8 +#define TIM11_R 9 +#define TIM12_R 10 +#define TIM13_R 11 +#define TIM14_R 12 +#define TIM15_R 13 +#define TIM16_R 14 +#define TIM17_R 15 +#define TIM20_R 16 +#define LPTIM1_R 17 +#define LPTIM2_R 18 +#define LPTIM3_R 19 +#define LPTIM4_R 20 +#define LPTIM5_R 21 +#define SPI1_R 22 +#define SPI2_R 23 +#define SPI3_R 24 +#define SPI4_R 25 +#define SPI5_R 26 +#define SPI6_R 27 +#define SPI7_R 28 +#define SPI8_R 29 +#define SPDIFRX_R 30 +#define USART1_R 31 +#define USART2_R 32 +#define USART3_R 33 +#define UART4_R 34 +#define UART5_R 35 +#define USART6_R 36 +#define UART7_R 37 +#define UART8_R 38 +#define UART9_R 39 +#define LPUART1_R 40 +#define IS2M_R 41 +#define I2C1_R 42 +#define I2C2_R 43 +#define I2C3_R 44 +#define I2C4_R 45 +#define I2C5_R 46 +#define I2C6_R 47 +#define I2C7_R 48 +#define I2C8_R 49 +#define SAI1_R 50 +#define SAI2_R 51 +#define SAI3_R 52 +#define SAI4_R 53 +#define MDF1_R 54 +#define MDF2_R 55 +#define FDCAN_R 56 +#define HDP_R 57 +#define ADC12_R 58 +#define ADC3_R 59 +#define ETH1_R 60 +#define ETH2_R 61 +#define USB2_R 62 +#define USB2PHY1_R 63 +#define USB2PHY2_R 64 +#define USB3DR_R 65 +#define USB3PCIEPHY_R 66 +#define USBTC_R 67 +#define ETHSW_R 68 +#define SDMMC1_R 69 +#define SDMMC1DLL_R 70 +#define SDMMC2_R 71 +#define SDMMC2DLL_R 72 +#define SDMMC3_R 73 +#define SDMMC3DLL_R 74 +#define GPU_R 75 +#define LTDC_R 76 +#define DSI_R 77 +#define LVDS_R 78 +#define CSI_R 79 +#define DCMIPP_R 80 +#define CCI_R 81 +#define VDEC_R 82 +#define VENC_R 83 +#define WWDG1_R 84 +#define WWDG2_R 85 +#define VREF_R 86 +#define DTS_R 87 +#define CRC_R 88 +#define SERC_R 89 +#define OSPIIOM_R 90 +#define I3C1_R 91 +#define I3C2_R 92 +#define I3C3_R 93 +#define I3C4_R 94 +#define IWDG2_KER_R 95 +#define IWDG4_KER_R 96 +#define RNG_R 97 +#define PKA_R 98 +#define SAES_R 99 +#define HASH_R 100 +#define CRYP1_R 101 +#define CRYP2_R 102 +#define PCIE_R 103 +#define OSPI1_R 104 +#define OSPI1DLL_R 105 +#define OSPI2_R 106 +#define OSPI2DLL_R 107 +#define FMC_R 108 +#define DBG_R 109 +#define GPIOA_R 110 +#define GPIOB_R 111 +#define GPIOC_R 112 +#define GPIOD_R 113 +#define GPIOE_R 114 +#define GPIOF_R 115 +#define GPIOG_R 116 +#define GPIOH_R 117 +#define GPIOI_R 118 +#define GPIOJ_R 119 +#define GPIOK_R 120 +#define GPIOZ_R 121 +#define HPDMA1_R 122 +#define HPDMA2_R 123 +#define HPDMA3_R 124 +#define LPDMA_R 125 +#define HSEM_R 126 +#define IPCC1_R 127 +#define IPCC2_R 128 +#define C2_HOLDBOOT_R 129 +#define C1_HOLDBOOT_R 130 +#define C1_R 131 +#define C1P1POR_R 132 +#define C1P1_R 133 +#define C2_R 134 +#define C3_R 135 +#define SYS_R 136 +#define VSW_R 137 +#define C1MS_R 138 +#define DDRCP_R 139 +#define DDRCAPB_R 140 +#define DDRPHYCAPB_R 141 +#define DDRCFG_R 142 +#define DDR_R 143 + +#define STM32MP25_LAST_RESET 144 + +#define RST_SCMI_C1_R 0 +#define RST_SCMI_C2_R 1 +#define RST_SCMI_C1_HOLDBOOT_R 2 +#define RST_SCMI_C2_HOLDBOOT_R 3 +#define RST_SCMI_FMC 4 +#define RST_SCMI_OSPI1 5 +#define RST_SCMI_OSPI1DLL 6 +#define RST_SCMI_OSPI2 7 +#define RST_SCMI_OSPI2DLL 8 + +#endif /* _DT_BINDINGS_STM32MP25_RESET_H_ */ diff --git a/include/dt-bindings/soc/rockchip,vop2.h b/include/dt-bindings/soc/rockchip,vop2.h index 6e66a802b96a..668f199df9f0 100644 --- a/include/dt-bindings/soc/rockchip,vop2.h +++ b/include/dt-bindings/soc/rockchip,vop2.h @@ -10,5 +10,9 @@ #define ROCKCHIP_VOP2_EP_LVDS0 5 #define ROCKCHIP_VOP2_EP_MIPI1 6 #define ROCKCHIP_VOP2_EP_LVDS1 7 +#define ROCKCHIP_VOP2_EP_HDMI1 8 +#define ROCKCHIP_VOP2_EP_EDP1 9 +#define ROCKCHIP_VOP2_EP_DP0 10 +#define ROCKCHIP_VOP2_EP_DP1 11 #endif /* __DT_BINDINGS_ROCKCHIP_VOP2_H */ diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h index 8fa5a46675c4..997e2f55128a 100644 --- a/include/dt-bindings/thermal/mediatek,lvts-thermal.h +++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h @@ -7,6 +7,15 @@ #ifndef __MEDIATEK_LVTS_DT_H #define __MEDIATEK_LVTS_DT_H +#define MT7988_CPU_0 0 +#define MT7988_CPU_1 1 +#define MT7988_ETH2P5G_0 2 +#define MT7988_ETH2P5G_1 3 +#define MT7988_TOPS_0 4 +#define MT7988_TOPS_1 5 +#define MT7988_ETHWARP_0 6 +#define MT7988_ETHWARP_1 7 + #define MT8195_MCU_BIG_CPU0 0 #define MT8195_MCU_BIG_CPU1 1 #define MT8195_MCU_BIG_CPU2 2 @@ -26,4 +35,23 @@ #define MT8195_AP_CAM0 15 #define MT8195_AP_CAM1 16 +#define MT8192_MCU_BIG_CPU0 0 +#define MT8192_MCU_BIG_CPU1 1 +#define MT8192_MCU_BIG_CPU2 2 +#define MT8192_MCU_BIG_CPU3 3 +#define MT8192_MCU_LITTLE_CPU0 4 +#define MT8192_MCU_LITTLE_CPU1 5 +#define MT8192_MCU_LITTLE_CPU2 6 +#define MT8192_MCU_LITTLE_CPU3 7 + +#define MT8192_AP_VPU0 8 +#define MT8192_AP_VPU1 9 +#define MT8192_AP_GPU0 10 +#define MT8192_AP_GPU1 11 +#define MT8192_AP_INFRA 12 +#define MT8192_AP_CAM 13 +#define MT8192_AP_MD0 14 +#define MT8192_AP_MD1 15 +#define MT8192_AP_MD2 16 + #endif /* __MEDIATEK_LVTS_DT_H */ diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h new file mode 100644 index 000000000000..7ae6d84b2bd9 --- /dev/null +++ b/include/dt-bindings/watchdog/aspeed-wdt.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef DT_BINDINGS_ASPEED_WDT_H +#define DT_BINDINGS_ASPEED_WDT_H + +#define AST2500_WDT_RESET_CPU (1 << 0) +#define AST2500_WDT_RESET_COPROC (1 << 1) +#define AST2500_WDT_RESET_SDRAM (1 << 2) +#define AST2500_WDT_RESET_AHB (1 << 3) +#define AST2500_WDT_RESET_I2C (1 << 4) +#define AST2500_WDT_RESET_MAC0 (1 << 5) +#define AST2500_WDT_RESET_MAC1 (1 << 6) +#define AST2500_WDT_RESET_GRAPHICS (1 << 7) +#define AST2500_WDT_RESET_USB2_HOST_HUB (1 << 8) +#define AST2500_WDT_RESET_USB_HOST (1 << 9) +#define AST2500_WDT_RESET_HID_EHCI (1 << 10) +#define AST2500_WDT_RESET_VIDEO (1 << 11) +#define AST2500_WDT_RESET_HAC (1 << 12) +#define AST2500_WDT_RESET_LPC (1 << 13) +#define AST2500_WDT_RESET_SDIO (1 << 14) +#define AST2500_WDT_RESET_MIC (1 << 15) +#define AST2500_WDT_RESET_CRT (1 << 16) +#define AST2500_WDT_RESET_PWM (1 << 17) +#define AST2500_WDT_RESET_PECI (1 << 18) +#define AST2500_WDT_RESET_JTAG (1 << 19) +#define AST2500_WDT_RESET_ADC (1 << 20) +#define AST2500_WDT_RESET_GPIO (1 << 21) +#define AST2500_WDT_RESET_MCTP (1 << 22) +#define AST2500_WDT_RESET_XDMA (1 << 23) +#define AST2500_WDT_RESET_SPI (1 << 24) +#define AST2500_WDT_RESET_SOC_MISC (1 << 25) + +#define AST2500_WDT_RESET_DEFAULT 0x023ffff3 + +#define AST2600_WDT_RESET1_CPU (1 << 0) +#define AST2600_WDT_RESET1_SDRAM (1 << 1) +#define AST2600_WDT_RESET1_AHB (1 << 2) +#define AST2600_WDT_RESET1_SLI (1 << 3) +#define AST2600_WDT_RESET1_SOC_MISC0 (1 << 4) +#define AST2600_WDT_RESET1_COPROC (1 << 5) +#define AST2600_WDT_RESET1_USB_A (1 << 6) +#define AST2600_WDT_RESET1_USB_B (1 << 7) +#define AST2600_WDT_RESET1_UHCI (1 << 8) +#define AST2600_WDT_RESET1_GRAPHICS (1 << 9) +#define AST2600_WDT_RESET1_CRT (1 << 10) +#define AST2600_WDT_RESET1_VIDEO (1 << 11) +#define AST2600_WDT_RESET1_HAC (1 << 12) +#define AST2600_WDT_RESET1_DP (1 << 13) +#define AST2600_WDT_RESET1_DP_MCU (1 << 14) +#define AST2600_WDT_RESET1_GP_MCU (1 << 15) +#define AST2600_WDT_RESET1_MAC0 (1 << 16) +#define AST2600_WDT_RESET1_MAC1 (1 << 17) +#define AST2600_WDT_RESET1_SDIO0 (1 << 18) +#define AST2600_WDT_RESET1_JTAG0 (1 << 19) +#define AST2600_WDT_RESET1_MCTP0 (1 << 20) +#define AST2600_WDT_RESET1_MCTP1 (1 << 21) +#define AST2600_WDT_RESET1_XDMA0 (1 << 22) +#define AST2600_WDT_RESET1_XDMA1 (1 << 23) +#define AST2600_WDT_RESET1_GPIO0 (1 << 24) +#define AST2600_WDT_RESET1_RVAS (1 << 25) + +#define AST2600_WDT_RESET1_DEFAULT 0x030f1ff1 + +#define AST2600_WDT_RESET2_CPU (1 << 0) +#define AST2600_WDT_RESET2_SPI (1 << 1) +#define AST2600_WDT_RESET2_AHB2 (1 << 2) +#define AST2600_WDT_RESET2_SLI2 (1 << 3) +#define AST2600_WDT_RESET2_SOC_MISC1 (1 << 4) +#define AST2600_WDT_RESET2_MAC2 (1 << 5) +#define AST2600_WDT_RESET2_MAC3 (1 << 6) +#define AST2600_WDT_RESET2_SDIO1 (1 << 7) +#define AST2600_WDT_RESET2_JTAG1 (1 << 8) +#define AST2600_WDT_RESET2_GPIO1 (1 << 9) +#define AST2600_WDT_RESET2_MDIO (1 << 10) +#define AST2600_WDT_RESET2_LPC (1 << 11) +#define AST2600_WDT_RESET2_PECI (1 << 12) +#define AST2600_WDT_RESET2_PWM (1 << 13) +#define AST2600_WDT_RESET2_ADC (1 << 14) +#define AST2600_WDT_RESET2_FSI (1 << 15) +#define AST2600_WDT_RESET2_I2C (1 << 16) +#define AST2600_WDT_RESET2_I3C_GLOBAL (1 << 17) +#define AST2600_WDT_RESET2_I3C0 (1 << 18) +#define AST2600_WDT_RESET2_I3C1 (1 << 19) +#define AST2600_WDT_RESET2_I3C2 (1 << 20) +#define AST2600_WDT_RESET2_I3C3 (1 << 21) +#define AST2600_WDT_RESET2_I3C4 (1 << 22) +#define AST2600_WDT_RESET2_I3C5 (1 << 23) +#define AST2600_WDT_RESET2_ESPI (1 << 26) + +#define AST2600_WDT_RESET2_DEFAULT 0x03fffff1 + +#endif diff --git a/include/kunit/device.h b/include/kunit/device.h new file mode 100644 index 000000000000..2450110ad64e --- /dev/null +++ b/include/kunit/device.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * KUnit basic device implementation + * + * Helpers for creating and managing fake devices for KUnit tests. + * + * Copyright (C) 2023, Google LLC. + * Author: David Gow <davidgow@google.com> + */ + +#ifndef _KUNIT_DEVICE_H +#define _KUNIT_DEVICE_H + +#if IS_ENABLED(CONFIG_KUNIT) + +#include <kunit/test.h> + +struct device; +struct device_driver; + +/** + * kunit_driver_create() - Create a struct device_driver attached to the kunit_bus + * @test: The test context object. + * @name: The name to give the created driver. + * + * Creates a struct device_driver attached to the kunit_bus, with the name @name. + * This driver will automatically be cleaned up on test exit. + * + * Return: a stub struct device_driver, managed by KUnit, with the name @name. + */ +struct device_driver *kunit_driver_create(struct kunit *test, const char *name); + +/** + * kunit_device_register() - Create a struct device for use in KUnit tests + * @test: The test context object. + * @name: The name to give the created device. + * + * Creates a struct kunit_device (which is a struct device) with the given name, + * and a corresponding driver. The device and driver will be cleaned up on test + * exit, or when kunit_device_unregister is called. See also + * kunit_device_register_with_driver, if you wish to provide your own + * struct device_driver. + * + * Return: a pointer to a struct device which will be cleaned up when the test + * exits, or an error pointer if the device could not be allocated or registered. + */ +struct device *kunit_device_register(struct kunit *test, const char *name); + +/** + * kunit_device_register_with_driver() - Create a struct device for use in KUnit tests + * @test: The test context object. + * @name: The name to give the created device. + * @drv: The struct device_driver to associate with the device. + * + * Creates a struct kunit_device (which is a struct device) with the given + * name, and driver. The device will be cleaned up on test exit, or when + * kunit_device_unregister is called. See also kunit_device_register, if you + * wish KUnit to create and manage a driver for you. + * + * Return: a pointer to a struct device which will be cleaned up when the test + * exits, or an error pointer if the device could not be allocated or registered. + */ +struct device *kunit_device_register_with_driver(struct kunit *test, + const char *name, + const struct device_driver *drv); + +/** + * kunit_device_unregister() - Unregister a KUnit-managed device + * @test: The test context object which created the device + * @dev: The device. + * + * Unregisters and destroys a struct device which was created with + * kunit_device_register or kunit_device_register_with_driver. If KUnit created + * a driver, cleans it up as well. + */ +void kunit_device_unregister(struct kunit *test, struct device *dev); + +#endif + +#endif diff --git a/include/kunit/resource.h b/include/kunit/resource.h index c7383e90f5c9..4ad69a2642a5 100644 --- a/include/kunit/resource.h +++ b/include/kunit/resource.h @@ -391,6 +391,27 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res); typedef void (kunit_action_t)(void *); /** + * KUNIT_DEFINE_ACTION_WRAPPER() - Wrap a function for use as a deferred action. + * + * @wrapper: The name of the new wrapper function define. + * @orig: The original function to wrap. + * @arg_type: The type of the argument accepted by @orig. + * + * Defines a wrapper for a function which accepts a single, pointer-sized + * argument. This wrapper can then be passed to kunit_add_action() and + * similar. This should be used in preference to casting a function + * directly to kunit_action_t, as casting function pointers will break + * control flow integrity (CFI), leading to crashes. + */ +#define KUNIT_DEFINE_ACTION_WRAPPER(wrapper, orig, arg_type) \ + static void wrapper(void *in) \ + { \ + arg_type arg = (arg_type)in; \ + orig(arg); \ + } + + +/** * kunit_add_action() - Call a function when the test ends. * @test: Test case to associate the action with. * @action: The function to run on test exit diff --git a/include/kunit/skbuff.h b/include/kunit/skbuff.h new file mode 100644 index 000000000000..44d12370939a --- /dev/null +++ b/include/kunit/skbuff.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * KUnit resource management helpers for SKBs (skbuff). + * + * Copyright (C) 2023 Intel Corporation + */ + +#ifndef _KUNIT_SKBUFF_H +#define _KUNIT_SKBUFF_H + +#include <kunit/resource.h> +#include <linux/skbuff.h> + +static void kunit_action_kfree_skb(void *p) +{ + kfree_skb((struct sk_buff *)p); +} + +/** + * kunit_zalloc_skb() - Allocate and initialize a resource managed skb. + * @test: The test case to which the skb belongs + * @len: size to allocate + * + * Allocate a new struct sk_buff with GFP_KERNEL, zero fill the give length + * and add it as a resource to the kunit test for automatic cleanup. + * + * Returns: newly allocated SKB, or %NULL on error + */ +static inline struct sk_buff *kunit_zalloc_skb(struct kunit *test, int len, + gfp_t gfp) +{ + struct sk_buff *res = alloc_skb(len, GFP_KERNEL); + + if (!res || skb_pad(res, len)) + return NULL; + + if (kunit_add_action_or_reset(test, kunit_action_kfree_skb, res)) + return NULL; + + return res; +} + +/** + * kunit_kfree_skb() - Like kfree_skb except for allocations managed by KUnit. + * @test: The test case to which the resource belongs. + * @skb: The SKB to free. + */ +static inline void kunit_kfree_skb(struct kunit *test, struct sk_buff *skb) +{ + if (!skb) + return; + + kunit_release_action(test, kunit_action_kfree_skb, (void *)skb); +} + +#endif /* _KUNIT_SKBUFF_H */ diff --git a/include/kunit/static_stub.h b/include/kunit/static_stub.h index 85315c80b303..bf940322dfc0 100644 --- a/include/kunit/static_stub.h +++ b/include/kunit/static_stub.h @@ -93,7 +93,7 @@ void __kunit_activate_static_stub(struct kunit *test, * The redirection can be disabled again with kunit_deactivate_static_stub(). */ #define kunit_activate_static_stub(test, real_fn_addr, replacement_addr) do { \ - typecheck_fn(typeof(&real_fn_addr), replacement_addr); \ + typecheck_fn(typeof(&replacement_addr), real_fn_addr); \ __kunit_activate_static_stub(test, real_fn_addr, replacement_addr); \ } while (0) diff --git a/include/kunit/test.h b/include/kunit/test.h index 68ff01aee244..fcb4a4940ace 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -33,9 +33,7 @@ DECLARE_STATIC_KEY_FALSE(kunit_running); struct kunit; - -/* Size of log associated with test. */ -#define KUNIT_LOG_SIZE 2048 +struct string_stream; /* Maximum size of parameter description string. */ #define KUNIT_PARAM_DESC_SIZE 128 @@ -133,7 +131,7 @@ struct kunit_case { /* private: internal use only. */ enum kunit_status status; char *module_name; - char *log; + struct string_stream *log; }; static inline char *kunit_status_to_ok_not_ok(enum kunit_status status) @@ -253,8 +251,9 @@ struct kunit_suite { /* private: internal use only */ char status_comment[KUNIT_STATUS_COMMENT_SIZE]; struct dentry *debugfs; - char *log; + struct string_stream *log; int suite_init_err; + bool is_init; }; /* Stores an array of suites, end points one past the end */ @@ -279,7 +278,7 @@ struct kunit { /* private: internal use only. */ const char *name; /* Read only after initialization! */ - char *log; /* Points at case log after initialization */ + struct string_stream *log; /* Points at case log after initialization */ struct kunit_try_catch try_catch; /* param_value is the current parameter value for a test case. */ const void *param_value; @@ -315,7 +314,7 @@ const char *kunit_filter_glob(void); char *kunit_filter(void); char *kunit_filter_action(void); -void kunit_init_test(struct kunit *test, const char *name, char *log); +void kunit_init_test(struct kunit *test, const char *name, struct string_stream *log); int kunit_run_tests(struct kunit_suite *suite); @@ -339,6 +338,9 @@ void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites); void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin); void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr); +struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set, + struct kunit_suite_set suite_set); + #if IS_BUILTIN(CONFIG_KUNIT) int kunit_run_all_tests(void); #else @@ -373,6 +375,11 @@ static inline int kunit_run_all_tests(void) #define kunit_test_suite(suite) kunit_test_suites(&suite) +#define __kunit_init_test_suites(unique_array, ...) \ + static struct kunit_suite *unique_array[] \ + __aligned(sizeof(struct kunit_suite *)) \ + __used __section(".kunit_init_test_suites") = { __VA_ARGS__ } + /** * kunit_test_init_section_suites() - used to register one or more &struct * kunit_suite containing init functions or @@ -380,21 +387,21 @@ static inline int kunit_run_all_tests(void) * * @__suites: a statically allocated list of &struct kunit_suite. * - * This functions identically as kunit_test_suites() except that it suppresses - * modpost warnings for referencing functions marked __init or data marked - * __initdata; this is OK because currently KUnit only runs tests upon boot - * during the init phase or upon loading a module during the init phase. + * This functions similar to kunit_test_suites() except that it compiles the + * list of suites during init phase. + * + * This macro also suffixes the array and suite declarations it makes with + * _probe; so that modpost suppresses warnings about referencing init data + * for symbols named in this manner. * - * NOTE TO KUNIT DEVS: If we ever allow KUnit tests to be run after boot, these - * tests must be excluded. + * Note: these init tests are not able to be run after boot so there is no + * "run" debugfs file generated for these tests. * - * The only thing this macro does that's different from kunit_test_suites is - * that it suffixes the array and suite declarations it makes with _probe; - * modpost suppresses warnings about referencing init data for symbols named in - * this manner. + * Also, do not mark the suite or test case structs with __initdata because + * they will be used after the init phase with debugfs. */ #define kunit_test_init_section_suites(__suites...) \ - __kunit_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \ + __kunit_init_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \ ##__suites) #define kunit_test_init_section_suite(suite) \ @@ -473,7 +480,7 @@ static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp void kunit_cleanup(struct kunit *test); -void __printf(2, 3) kunit_log_append(char *log, const char *fmt, ...); +void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, ...); /** * kunit_mark_skipped() - Marks @test_or_suite as skipped @@ -751,7 +758,7 @@ do { \ .right_text = #right, \ }; \ \ - if (likely(strcmp(__left, __right) op 0)) \ + if (likely((__left) && (__right) && (strcmp(__left, __right) op 0))) \ break; \ \ \ @@ -1516,6 +1523,25 @@ do { \ return NULL; \ } +/** + * KUNIT_ARRAY_PARAM_DESC() - Define test parameter generator from an array. + * @name: prefix for the test parameter generator function. + * @array: array of test parameters. + * @desc_member: structure member from array element to use as description + * + * Define function @name_gen_params which uses @array to generate parameters. + */ +#define KUNIT_ARRAY_PARAM_DESC(name, array, desc_member) \ + static const void *name##_gen_params(const void *prev, char *desc) \ + { \ + typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \ + if (__next - (array) < ARRAY_SIZE((array))) { \ + strscpy(desc, __next->desc_member, KUNIT_PARAM_DESC_SIZE); \ + return __next; \ + } \ + return NULL; \ + } + // TODO(dlatypov@google.com): consider eventually migrating users to explicitly // include resource.h themselves if they need it. #include <kunit/resource.h> diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index bb3cb005873e..c819c5d16613 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -82,6 +82,8 @@ struct timer_map { struct arch_timer_context *emul_ptimer; }; +void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map); + struct arch_timer_cpu { struct arch_timer_context timers[NR_KVM_TIMERS]; @@ -94,7 +96,7 @@ struct arch_timer_cpu { int __init kvm_timer_hyp_init(bool has_gic); int kvm_timer_enable(struct kvm_vcpu *vcpu); -int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_sync_user(struct kvm_vcpu *vcpu); bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); @@ -145,4 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt); void kvm_timer_cpu_up(void); void kvm_timer_cpu_down(void); +static inline bool has_cntpoff(void) +{ + return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); +} + #endif diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 847da6fc2713..4b9d8fb393a8 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -12,8 +12,7 @@ #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) -#ifdef CONFIG_HW_PERF_EVENTS - +#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; @@ -63,6 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); +void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu); int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, @@ -74,9 +74,10 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); struct kvm_pmu_events *kvm_get_pmu_events(void); void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); +void kvm_vcpu_pmu_resync_el0(void); #define kvm_vcpu_has_pmu(vcpu) \ - (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) + (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3)) /* * Updates the vcpu's view of the pmu events for this cpu. @@ -100,7 +101,11 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); }) u8 kvm_arm_pmu_get_pmuver_limit(void); +u64 kvm_pmu_evtyper_mask(struct kvm *kvm); +int kvm_arm_set_default_pmu(struct kvm *kvm); +u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; @@ -167,10 +172,31 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {} static inline u8 kvm_arm_pmu_get_pmuver_limit(void) { return 0; } +static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) +{ + return 0; +} +static inline void kvm_vcpu_pmu_resync_el0(void) {} + +static inline int kvm_arm_set_default_pmu(struct kvm *kvm) +{ + return -ENODEV; +} + +static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) +{ + return 0; +} + +static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return 0; +} #endif diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 6e55b9283789..e8fb624013d1 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -26,7 +26,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu) * revisions. It is thus safe to return the latest, unless * userspace has instructed us otherwise. */ - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { + if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) { if (vcpu->kvm->arch.psci_version) return vcpu->kvm->arch.psci_version; diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 5b27f94d4fad..8cc38e836f54 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -375,8 +375,8 @@ int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); void kvm_vgic_init_cpu_hardware(void); -int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, - bool level, void *owner); +int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, + unsigned int intid, bool level, void *owner); int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, u32 vintid, struct irq_ops *ops); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index a73246c3c35e..b7165e52b3c6 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -15,6 +15,7 @@ #include <linux/mod_devicetable.h> #include <linux/property.h> #include <linux/uuid.h> +#include <linux/node.h> struct irq_domain; struct irq_domain_ops; @@ -30,6 +31,7 @@ struct irq_domain_ops; #include <linux/dynamic_debug.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/fw_table.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> @@ -37,6 +39,16 @@ struct irq_domain_ops; #include <acpi/acpi_io.h> #include <asm/acpi.h> +#ifdef CONFIG_ACPI_TABLE_LIB +#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI) +#define __init_or_acpilib +#define __initdata_or_acpilib +#else +#define EXPORT_SYMBOL_ACPI_LIB(x) +#define __init_or_acpilib __init +#define __initdata_or_acpilib __initdata +#endif + static inline acpi_handle acpi_device_handle(struct acpi_device *adev) { return adev ? adev->handle : NULL; @@ -119,21 +131,8 @@ enum acpi_address_range_id { /* Table Handlers */ -union acpi_subtable_headers { - struct acpi_subtable_header common; - struct acpi_hmat_structure hmat; - struct acpi_prmt_module_header prmt; - struct acpi_cedt_header cedt; -}; - typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); -typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header, - const unsigned long end); - -typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header, - void *arg, const unsigned long end); - /* Debugger support */ struct acpi_debugger_ops { @@ -207,14 +206,6 @@ static inline int acpi_debugger_notify_command_complete(void) (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) -struct acpi_subtable_proc { - int id; - acpi_tbl_entry_handler handler; - acpi_tbl_entry_handler_arg handler_arg; - void *arg; - int count; -}; - void __iomem *__acpi_map_table(unsigned long phys, unsigned long size); void __acpi_unmap_table(void __iomem *map, unsigned long size); int early_acpi_boot_init(void); @@ -229,16 +220,6 @@ void acpi_reserve_initial_tables (void); void acpi_table_init_complete (void); int acpi_table_init (void); -#ifdef CONFIG_ACPI_TABLE_LIB -#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI) -#define __init_or_acpilib -#define __initdata_or_acpilib -#else -#define EXPORT_SYMBOL_ACPI_LIB(x) -#define __init_or_acpilib __init -#define __initdata_or_acpilib __initdata -#endif - int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init_or_acpilib acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, @@ -256,10 +237,15 @@ acpi_table_parse_cedt(enum acpi_cedt_type id, int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); +static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc) +{ + return gicc->flags & ACPI_MADT_ENABLED; +} + /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void @@ -439,6 +425,23 @@ extern int acpi_blacklisted(void); extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); +#ifdef CONFIG_ACPI_THERMAL_LIB +int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp); +int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp); +int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp); +int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp); +#endif + +#ifdef CONFIG_ACPI_HMAT +int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord); +#else +static inline int acpi_get_genport_coordinates(u32 uid, + struct access_coordinate *coord) +{ + return -EOPNOTSUPP; +} +#endif + #ifdef CONFIG_ACPI_NUMA int acpi_map_pxm_to_node(int pxm); int acpi_get_node(acpi_handle handle); @@ -771,6 +774,10 @@ const char *acpi_get_subsystem_id(acpi_handle handle); #define ACPI_HANDLE(dev) (NULL) #define ACPI_HANDLE_FWNODE(fwnode) (NULL) +/* Get rid of the -Wunused-variable for adev */ +#define acpi_dev_uid_match(adev, uid2) (adev && false) +#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false) + #include <acpi/acpi_numa.h> struct fwnode_handle; @@ -787,12 +794,6 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) struct acpi_device; -static inline bool -acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2) -{ - return false; -} - static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer) { return -ENODEV; @@ -1114,15 +1115,8 @@ static inline int acpi_get_lps0_constraint(struct device *dev) return ACPI_STATE_UNKNOWN; } #endif /* CONFIG_SUSPEND && CONFIG_X86 */ -#ifndef CONFIG_IA64 void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else -static inline void arch_reserve_mem_area(acpi_physical_address addr, - size_t size) -{ -} -#endif /* CONFIG_X86 */ -#else #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) #endif @@ -1480,6 +1474,15 @@ static inline int lpit_read_residency_count_address(u64 *address) } #endif +#ifdef CONFIG_ACPI_PROCESSOR_IDLE +#ifndef arch_get_idle_state_flags +static inline unsigned int arch_get_idle_state_flags(u32 arch_flags) +{ + return 0; +} +#endif +#endif /* CONFIG_ACPI_PROCESSOR_IDLE */ + #ifdef CONFIG_ACPI_PPTT int acpi_pptt_cpu_is_thread(unsigned int cpu); int find_acpi_cpu_topology(unsigned int cpu, int level); @@ -1539,4 +1542,9 @@ static inline void acpi_device_notify(struct device *dev) { } static inline void acpi_device_notify_remove(struct device *dev) { } #endif +static inline void acpi_use_parent_companion(struct device *dev) +{ + ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent)); +} + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_amd_wbrf.h b/include/linux/acpi_amd_wbrf.h new file mode 100644 index 000000000000..898f31d536d4 --- /dev/null +++ b/include/linux/acpi_amd_wbrf.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Wifi Band Exclusion Interface (AMD ACPI Implementation) + * Copyright (C) 2023 Advanced Micro Devices + */ + +#ifndef _ACPI_AMD_WBRF_H +#define _ACPI_AMD_WBRF_H + +#include <linux/device.h> +#include <linux/notifier.h> + +/* The maximum number of frequency band ranges */ +#define MAX_NUM_OF_WBRF_RANGES 11 + +/* Record actions */ +#define WBRF_RECORD_ADD 0x0 +#define WBRF_RECORD_REMOVE 0x1 + +/** + * struct freq_band_range - Wifi frequency band range definition + * @start: start frequency point (in Hz) + * @end: end frequency point (in Hz) + */ +struct freq_band_range { + u64 start; + u64 end; +}; + +/** + * struct wbrf_ranges_in_out - wbrf ranges info + * @num_of_ranges: total number of band ranges in this struct + * @band_list: array of Wifi band ranges + */ +struct wbrf_ranges_in_out { + u64 num_of_ranges; + struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES]; +}; + +/** + * enum wbrf_notifier_actions - wbrf notifier actions index + * @WBRF_CHANGED: there was some frequency band updates. The consumers + * should retrieve the latest active frequency bands. + */ +enum wbrf_notifier_actions { + WBRF_CHANGED, +}; + +#if IS_ENABLED(CONFIG_AMD_WBRF) +bool acpi_amd_wbrf_supported_producer(struct device *dev); +int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in); +bool acpi_amd_wbrf_supported_consumer(struct device *dev); +int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out); +int amd_wbrf_register_notifier(struct notifier_block *nb); +int amd_wbrf_unregister_notifier(struct notifier_block *nb); +#else +static inline +bool acpi_amd_wbrf_supported_consumer(struct device *dev) +{ + return false; +} + +static inline +int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in) +{ + return -ENODEV; +} + +static inline +bool acpi_amd_wbrf_supported_producer(struct device *dev) +{ + return false; +} +static inline +int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out) +{ + return -ENODEV; +} +static inline +int amd_wbrf_register_notifier(struct notifier_block *nb) +{ + return -ENODEV; +} +static inline +int amd_wbrf_unregister_notifier(struct notifier_block *nb) +{ + return -ENODEV; +} +#endif /* CONFIG_AMD_WBRF */ + +#endif /* _ACPI_AMD_WBRF_H */ diff --git a/include/linux/aer.h b/include/linux/aer.h index 2dd175f5debd..ae0fae70d4bd 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -19,10 +19,10 @@ struct pci_dev; struct aer_header_log_regs { - unsigned int dw0; - unsigned int dw1; - unsigned int dw2; - unsigned int dw3; + u32 dw0; + u32 dw1; + u32 dw2; + u32 dw3; }; struct aer_capability_regs { @@ -42,14 +42,16 @@ struct aer_capability_regs { #if defined(CONFIG_PCIEAER) int pci_aer_clear_nonfatal_status(struct pci_dev *dev); +int pcie_aer_is_native(struct pci_dev *dev); #else static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev) { return -EINVAL; } +static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; } #endif -void cper_print_aer(struct pci_dev *dev, int aer_severity, +void pci_print_aer(struct pci_dev *dev, int aer_severity, struct aer_capability_regs *aer); int cper_severity_to_aer(int cper_severity); void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h deleted file mode 100644 index 421b0fa90d6a..000000000000 --- a/include/linux/amba/clcd-regs.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * David A Rusling - * - * Copyright (C) 2001 ARM Limited - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive - * for more details. - */ - -#ifndef AMBA_CLCD_REGS_H -#define AMBA_CLCD_REGS_H - -/* - * CLCD Controller Internal Register addresses - */ -#define CLCD_TIM0 0x00000000 -#define CLCD_TIM1 0x00000004 -#define CLCD_TIM2 0x00000008 -#define CLCD_TIM3 0x0000000c -#define CLCD_UBAS 0x00000010 -#define CLCD_LBAS 0x00000014 - -#define CLCD_PL110_IENB 0x00000018 -#define CLCD_PL110_CNTL 0x0000001c -#define CLCD_PL110_STAT 0x00000020 -#define CLCD_PL110_INTR 0x00000024 -#define CLCD_PL110_UCUR 0x00000028 -#define CLCD_PL110_LCUR 0x0000002C - -#define CLCD_PL111_CNTL 0x00000018 -#define CLCD_PL111_IENB 0x0000001c -#define CLCD_PL111_RIS 0x00000020 -#define CLCD_PL111_MIS 0x00000024 -#define CLCD_PL111_ICR 0x00000028 -#define CLCD_PL111_UCUR 0x0000002c -#define CLCD_PL111_LCUR 0x00000030 - -#define CLCD_PALL 0x00000200 -#define CLCD_PALETTE 0x00000200 - -#define TIM2_PCD_LO_MASK GENMASK(4, 0) -#define TIM2_PCD_LO_BITS 5 -#define TIM2_CLKSEL (1 << 5) -#define TIM2_ACB_MASK GENMASK(10, 6) -#define TIM2_IVS (1 << 11) -#define TIM2_IHS (1 << 12) -#define TIM2_IPC (1 << 13) -#define TIM2_IOE (1 << 14) -#define TIM2_BCD (1 << 26) -#define TIM2_PCD_HI_MASK GENMASK(31, 27) -#define TIM2_PCD_HI_BITS 5 -#define TIM2_PCD_HI_SHIFT 27 - -#define CNTL_LCDEN (1 << 0) -#define CNTL_LCDBPP1 (0 << 1) -#define CNTL_LCDBPP2 (1 << 1) -#define CNTL_LCDBPP4 (2 << 1) -#define CNTL_LCDBPP8 (3 << 1) -#define CNTL_LCDBPP16 (4 << 1) -#define CNTL_LCDBPP16_565 (6 << 1) -#define CNTL_LCDBPP16_444 (7 << 1) -#define CNTL_LCDBPP24 (5 << 1) -#define CNTL_LCDBW (1 << 4) -#define CNTL_LCDTFT (1 << 5) -#define CNTL_LCDMONO8 (1 << 6) -#define CNTL_LCDDUAL (1 << 7) -#define CNTL_BGR (1 << 8) -#define CNTL_BEBO (1 << 9) -#define CNTL_BEPO (1 << 10) -#define CNTL_LCDPWR (1 << 11) -#define CNTL_LCDVCOMP(x) ((x) << 12) -#define CNTL_LDMAFIFOTIME (1 << 15) -#define CNTL_WATERMARK (1 << 16) - -/* ST Microelectronics variant bits */ -#define CNTL_ST_1XBPP_444 0x0 -#define CNTL_ST_1XBPP_5551 (1 << 17) -#define CNTL_ST_1XBPP_565 (1 << 18) -#define CNTL_ST_CDWID_12 0x0 -#define CNTL_ST_CDWID_16 (1 << 19) -#define CNTL_ST_CDWID_18 (1 << 20) -#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20)) -#define CNTL_ST_CEAEN (1 << 21) -#define CNTL_ST_LCDBPP24_PACKED (6 << 1) - -#endif /* AMBA_CLCD_REGS_H */ diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h deleted file mode 100644 index b6e0cbeaf533..000000000000 --- a/include/linux/amba/clcd.h +++ /dev/null @@ -1,290 +0,0 @@ -/* - * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel. - * - * David A Rusling - * - * Copyright (C) 2001 ARM Limited - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive - * for more details. - */ -#include <linux/fb.h> -#include <linux/amba/clcd-regs.h> - -enum { - /* individual formats */ - CLCD_CAP_RGB444 = (1 << 0), - CLCD_CAP_RGB5551 = (1 << 1), - CLCD_CAP_RGB565 = (1 << 2), - CLCD_CAP_RGB888 = (1 << 3), - CLCD_CAP_BGR444 = (1 << 4), - CLCD_CAP_BGR5551 = (1 << 5), - CLCD_CAP_BGR565 = (1 << 6), - CLCD_CAP_BGR888 = (1 << 7), - - /* connection layouts */ - CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444, - CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551, - CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565, - CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888, - - /* red/blue ordering */ - CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 | - CLCD_CAP_RGB565 | CLCD_CAP_RGB888, - CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 | - CLCD_CAP_BGR565 | CLCD_CAP_BGR888, - - CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB, -}; - -struct backlight_device; - -struct clcd_panel { - struct fb_videomode mode; - signed short width; /* width in mm */ - signed short height; /* height in mm */ - u32 tim2; - u32 tim3; - u32 cntl; - u32 caps; - unsigned int bpp:8, - fixedtimings:1, - grayscale:1; - unsigned int connector; - struct backlight_device *backlight; - /* - * If the B/R lines are switched between the CLCD - * and the panel we need to know this and not try to - * compensate with the BGR bit in the control register. - */ - bool bgr_connection; -}; - -struct clcd_regs { - u32 tim0; - u32 tim1; - u32 tim2; - u32 tim3; - u32 cntl; - unsigned long pixclock; -}; - -struct clcd_fb; - -/* - * the board-type specific routines - */ -struct clcd_board { - const char *name; - - /* - * Optional. Hardware capability flags. - */ - u32 caps; - - /* - * Optional. Check whether the var structure is acceptable - * for this display. - */ - int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var); - - /* - * Compulsory. Decode fb->fb.var into regs->*. In the case of - * fixed timing, set regs->* to the register values required. - */ - void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs); - - /* - * Optional. Disable any extra display hardware. - */ - void (*disable)(struct clcd_fb *); - - /* - * Optional. Enable any extra display hardware. - */ - void (*enable)(struct clcd_fb *); - - /* - * Setup platform specific parts of CLCD driver - */ - int (*setup)(struct clcd_fb *); - - /* - * mmap the framebuffer memory - */ - int (*mmap)(struct clcd_fb *, struct vm_area_struct *); - - /* - * Remove platform specific parts of CLCD driver - */ - void (*remove)(struct clcd_fb *); -}; - -struct amba_device; -struct clk; - -/* this data structure describes each frame buffer device we find */ -struct clcd_fb { - struct fb_info fb; - struct amba_device *dev; - struct clk *clk; - struct clcd_panel *panel; - struct clcd_board *board; - void *board_data; - void __iomem *regs; - u16 off_ienb; - u16 off_cntl; - u32 clcd_cntl; - u32 cmap[16]; - bool clk_enabled; -}; - -static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs) -{ - struct fb_var_screeninfo *var = &fb->fb.var; - u32 val, cpl; - - /* - * Program the CLCD controller registers and start the CLCD - */ - val = ((var->xres / 16) - 1) << 2; - val |= (var->hsync_len - 1) << 8; - val |= (var->right_margin - 1) << 16; - val |= (var->left_margin - 1) << 24; - regs->tim0 = val; - - val = var->yres; - if (fb->panel->cntl & CNTL_LCDDUAL) - val /= 2; - val -= 1; - val |= (var->vsync_len - 1) << 10; - val |= var->lower_margin << 16; - val |= var->upper_margin << 24; - regs->tim1 = val; - - val = fb->panel->tim2; - val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS; - val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS; - - cpl = var->xres_virtual; - if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */ - /* / 1 */; - else if (!var->grayscale) /* STN color */ - cpl = cpl * 8 / 3; - else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */ - cpl /= 8; - else /* STN monochrome, 4bit */ - cpl /= 4; - - regs->tim2 = val | ((cpl - 1) << 16); - - regs->tim3 = fb->panel->tim3; - - val = fb->panel->cntl; - if (var->grayscale) - val |= CNTL_LCDBW; - - if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) { - /* - * if board and panel supply capabilities, we can support - * changing BGR/RGB depending on supplied parameters. Here - * we switch to what the framebuffer is providing if need - * be, so if the framebuffer is BGR but the display connection - * is RGB (first case) we switch it around. Vice versa mutatis - * mutandis if the framebuffer is RGB but the display connection - * is BGR, we flip it around. - */ - if (var->red.offset == 0) - val &= ~CNTL_BGR; - else - val |= CNTL_BGR; - if (fb->panel->bgr_connection) - val ^= CNTL_BGR; - } - - switch (var->bits_per_pixel) { - case 1: - val |= CNTL_LCDBPP1; - break; - case 2: - val |= CNTL_LCDBPP2; - break; - case 4: - val |= CNTL_LCDBPP4; - break; - case 8: - val |= CNTL_LCDBPP8; - break; - case 16: - /* - * PL110 cannot choose between 5551 and 565 modes in its - * control register. It is possible to use 565 with - * custom external wiring. - */ - if (amba_part(fb->dev) == 0x110 || - var->green.length == 5) - val |= CNTL_LCDBPP16; - else if (var->green.length == 6) - val |= CNTL_LCDBPP16_565; - else - val |= CNTL_LCDBPP16_444; - break; - case 32: - val |= CNTL_LCDBPP24; - break; - } - - regs->cntl = val; - regs->pixclock = var->pixclock; -} - -static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var) -{ - var->xres_virtual = var->xres = (var->xres + 15) & ~15; - var->yres_virtual = var->yres = (var->yres + 1) & ~1; - -#define CHECK(e,l,h) (var->e < l || var->e > h) - if (CHECK(right_margin, (5+1), 256) || /* back porch */ - CHECK(left_margin, (5+1), 256) || /* front porch */ - CHECK(hsync_len, (5+1), 256) || - var->xres > 4096 || - var->lower_margin > 255 || /* back porch */ - var->upper_margin > 255 || /* front porch */ - var->vsync_len > 32 || - var->yres > 1024) - return -EINVAL; -#undef CHECK - - /* single panel mode: PCD = max(PCD, 1) */ - /* dual panel mode: PCD = max(PCD, 5) */ - - /* - * You can't change the grayscale setting, and - * we can only do non-interlaced video. - */ - if (var->grayscale != fb->fb.var.grayscale || - (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED) - return -EINVAL; - -#define CHECK(e) (var->e != fb->fb.var.e) - if (fb->panel->fixedtimings && - (CHECK(xres) || - CHECK(yres) || - CHECK(bits_per_pixel) || - CHECK(pixclock) || - CHECK(left_margin) || - CHECK(right_margin) || - CHECK(upper_margin) || - CHECK(lower_margin) || - CHECK(hsync_len) || - CHECK(vsync_len) || - CHECK(sync))) - return -EINVAL; -#undef CHECK - - var->nonstd = 0; - var->accel_flags = 0; - - return 0; -} diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index a1307b58cc2c..9120de05ead0 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -10,6 +10,11 @@ #ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H #define ASM_ARM_HARDWARE_SERIAL_AMBA_H +#ifndef __ASSEMBLY__ +#include <linux/bitfield.h> +#include <linux/bits.h> +#endif + #include <linux/types.h> /* ------------------------------------------------------------------------------- @@ -70,141 +75,145 @@ #define ZX_UART011_ICR 0x4c #define ZX_UART011_DMACR 0x50 -#define UART011_DR_OE (1 << 11) -#define UART011_DR_BE (1 << 10) -#define UART011_DR_PE (1 << 9) -#define UART011_DR_FE (1 << 8) - -#define UART01x_RSR_OE 0x08 -#define UART01x_RSR_BE 0x04 -#define UART01x_RSR_PE 0x02 -#define UART01x_RSR_FE 0x01 - -#define UART011_FR_RI 0x100 -#define UART011_FR_TXFE 0x080 -#define UART011_FR_RXFF 0x040 -#define UART01x_FR_TXFF 0x020 -#define UART01x_FR_RXFE 0x010 -#define UART01x_FR_BUSY 0x008 -#define UART01x_FR_DCD 0x004 -#define UART01x_FR_DSR 0x002 -#define UART01x_FR_CTS 0x001 +#define UART011_DR_OE BIT(11) +#define UART011_DR_BE BIT(10) +#define UART011_DR_PE BIT(9) +#define UART011_DR_FE BIT(8) + +#define UART01x_RSR_OE BIT(3) +#define UART01x_RSR_BE BIT(2) +#define UART01x_RSR_PE BIT(1) +#define UART01x_RSR_FE BIT(0) + +#define UART011_FR_RI BIT(8) +#define UART011_FR_TXFE BIT(7) +#define UART011_FR_RXFF BIT(6) +#define UART01x_FR_TXFF (1 << 5) /* used in ASM */ +#define UART01x_FR_RXFE BIT(4) +#define UART01x_FR_BUSY (1 << 3) /* used in ASM */ +#define UART01x_FR_DCD BIT(2) +#define UART01x_FR_DSR BIT(1) +#define UART01x_FR_CTS BIT(0) #define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY) /* * Some bits of Flag Register on ZTE device have different position from * standard ones. */ -#define ZX_UART01x_FR_BUSY 0x100 -#define ZX_UART01x_FR_DSR 0x008 -#define ZX_UART01x_FR_CTS 0x002 -#define ZX_UART011_FR_RI 0x001 - -#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */ -#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */ -#define UART011_CR_OUT2 0x2000 /* OUT2 */ -#define UART011_CR_OUT1 0x1000 /* OUT1 */ -#define UART011_CR_RTS 0x0800 /* RTS */ -#define UART011_CR_DTR 0x0400 /* DTR */ -#define UART011_CR_RXE 0x0200 /* receive enable */ -#define UART011_CR_TXE 0x0100 /* transmit enable */ -#define UART011_CR_LBE 0x0080 /* loopback enable */ -#define UART010_CR_RTIE 0x0040 -#define UART010_CR_TIE 0x0020 -#define UART010_CR_RIE 0x0010 -#define UART010_CR_MSIE 0x0008 -#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */ -#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */ -#define UART01x_CR_SIREN 0x0002 /* SIR enable */ -#define UART01x_CR_UARTEN 0x0001 /* UART enable */ - -#define UART011_LCRH_SPS 0x80 +#define ZX_UART01x_FR_BUSY BIT(8) +#define ZX_UART01x_FR_DSR BIT(3) +#define ZX_UART01x_FR_CTS BIT(1) +#define ZX_UART011_FR_RI BIT(0) + +#define UART011_CR_CTSEN BIT(15) /* CTS hardware flow control */ +#define UART011_CR_RTSEN BIT(14) /* RTS hardware flow control */ +#define UART011_CR_OUT2 BIT(13) /* OUT2 */ +#define UART011_CR_OUT1 BIT(12) /* OUT1 */ +#define UART011_CR_RTS BIT(11) /* RTS */ +#define UART011_CR_DTR BIT(10) /* DTR */ +#define UART011_CR_RXE BIT(9) /* receive enable */ +#define UART011_CR_TXE BIT(8) /* transmit enable */ +#define UART011_CR_LBE BIT(7) /* loopback enable */ +#define UART010_CR_RTIE BIT(6) +#define UART010_CR_TIE BIT(5) +#define UART010_CR_RIE BIT(4) +#define UART010_CR_MSIE BIT(3) +#define ST_UART011_CR_OVSFACT BIT(3) /* Oversampling factor */ +#define UART01x_CR_IIRLP BIT(2) /* SIR low power mode */ +#define UART01x_CR_SIREN BIT(1) /* SIR enable */ +#define UART01x_CR_UARTEN BIT(0) /* UART enable */ + +#define UART011_LCRH_SPS BIT(7) #define UART01x_LCRH_WLEN_8 0x60 #define UART01x_LCRH_WLEN_7 0x40 #define UART01x_LCRH_WLEN_6 0x20 #define UART01x_LCRH_WLEN_5 0x00 -#define UART01x_LCRH_FEN 0x10 -#define UART01x_LCRH_STP2 0x08 -#define UART01x_LCRH_EPS 0x04 -#define UART01x_LCRH_PEN 0x02 -#define UART01x_LCRH_BRK 0x01 - -#define ST_UART011_DMAWM_RX_1 (0 << 3) -#define ST_UART011_DMAWM_RX_2 (1 << 3) -#define ST_UART011_DMAWM_RX_4 (2 << 3) -#define ST_UART011_DMAWM_RX_8 (3 << 3) -#define ST_UART011_DMAWM_RX_16 (4 << 3) -#define ST_UART011_DMAWM_RX_32 (5 << 3) -#define ST_UART011_DMAWM_RX_48 (6 << 3) -#define ST_UART011_DMAWM_TX_1 0 -#define ST_UART011_DMAWM_TX_2 1 -#define ST_UART011_DMAWM_TX_4 2 -#define ST_UART011_DMAWM_TX_8 3 -#define ST_UART011_DMAWM_TX_16 4 -#define ST_UART011_DMAWM_TX_32 5 -#define ST_UART011_DMAWM_TX_48 6 - -#define UART010_IIR_RTIS 0x08 -#define UART010_IIR_TIS 0x04 -#define UART010_IIR_RIS 0x02 -#define UART010_IIR_MIS 0x01 - -#define UART011_IFLS_RX1_8 (0 << 3) -#define UART011_IFLS_RX2_8 (1 << 3) -#define UART011_IFLS_RX4_8 (2 << 3) -#define UART011_IFLS_RX6_8 (3 << 3) -#define UART011_IFLS_RX7_8 (4 << 3) -#define UART011_IFLS_TX1_8 (0 << 0) -#define UART011_IFLS_TX2_8 (1 << 0) -#define UART011_IFLS_TX4_8 (2 << 0) -#define UART011_IFLS_TX6_8 (3 << 0) -#define UART011_IFLS_TX7_8 (4 << 0) +#define UART01x_LCRH_FEN BIT(4) +#define UART01x_LCRH_STP2 BIT(3) +#define UART01x_LCRH_EPS BIT(2) +#define UART01x_LCRH_PEN BIT(1) +#define UART01x_LCRH_BRK BIT(0) + +#define ST_UART011_DMAWM_RX GENMASK(5, 3) +#define ST_UART011_DMAWM_RX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 0) +#define ST_UART011_DMAWM_RX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 1) +#define ST_UART011_DMAWM_RX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 2) +#define ST_UART011_DMAWM_RX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 3) +#define ST_UART011_DMAWM_RX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 4) +#define ST_UART011_DMAWM_RX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 5) +#define ST_UART011_DMAWM_RX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 6) +#define ST_UART011_DMAWM_TX GENMASK(2, 0) +#define ST_UART011_DMAWM_TX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 0) +#define ST_UART011_DMAWM_TX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 1) +#define ST_UART011_DMAWM_TX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 2) +#define ST_UART011_DMAWM_TX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 3) +#define ST_UART011_DMAWM_TX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 4) +#define ST_UART011_DMAWM_TX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 5) +#define ST_UART011_DMAWM_TX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 6) + +#define UART010_IIR_RTIS BIT(3) +#define UART010_IIR_TIS BIT(2) +#define UART010_IIR_RIS BIT(1) +#define UART010_IIR_MIS BIT(0) + +#define UART011_IFLS_RXIFLSEL GENMASK(5, 3) +#define UART011_IFLS_RX1_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 0) +#define UART011_IFLS_RX2_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 1) +#define UART011_IFLS_RX4_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 2) +#define UART011_IFLS_RX6_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 3) +#define UART011_IFLS_RX7_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 4) +#define UART011_IFLS_TXIFLSEL GENMASK(2, 0) +#define UART011_IFLS_TX1_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 0) +#define UART011_IFLS_TX2_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 1) +#define UART011_IFLS_TX4_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 2) +#define UART011_IFLS_TX6_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 3) +#define UART011_IFLS_TX7_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 4) /* special values for ST vendor with deeper fifo */ -#define UART011_IFLS_RX_HALF (5 << 3) -#define UART011_IFLS_TX_HALF (5 << 0) - -#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */ -#define UART011_BEIM (1 << 9) /* break error interrupt mask */ -#define UART011_PEIM (1 << 8) /* parity error interrupt mask */ -#define UART011_FEIM (1 << 7) /* framing error interrupt mask */ -#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */ -#define UART011_TXIM (1 << 5) /* transmit interrupt mask */ -#define UART011_RXIM (1 << 4) /* receive interrupt mask */ -#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */ -#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */ -#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */ -#define UART011_RIMIM (1 << 0) /* RI interrupt mask */ - -#define UART011_OEIS (1 << 10) /* overrun error interrupt status */ -#define UART011_BEIS (1 << 9) /* break error interrupt status */ -#define UART011_PEIS (1 << 8) /* parity error interrupt status */ -#define UART011_FEIS (1 << 7) /* framing error interrupt status */ -#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */ -#define UART011_TXIS (1 << 5) /* transmit interrupt status */ -#define UART011_RXIS (1 << 4) /* receive interrupt status */ -#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */ -#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */ -#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */ -#define UART011_RIMIS (1 << 0) /* RI interrupt status */ - -#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */ -#define UART011_BEIC (1 << 9) /* break error interrupt clear */ -#define UART011_PEIC (1 << 8) /* parity error interrupt clear */ -#define UART011_FEIC (1 << 7) /* framing error interrupt clear */ -#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */ -#define UART011_TXIC (1 << 5) /* transmit interrupt clear */ -#define UART011_RXIC (1 << 4) /* receive interrupt clear */ -#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */ -#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */ -#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */ -#define UART011_RIMIC (1 << 0) /* RI interrupt clear */ - -#define UART011_DMAONERR (1 << 2) /* disable dma on error */ -#define UART011_TXDMAE (1 << 1) /* enable transmit dma */ -#define UART011_RXDMAE (1 << 0) /* enable receive dma */ - -#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE) -#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) +#define UART011_IFLS_RX_HALF FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 5) +#define UART011_IFLS_TX_HALF FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 5) + +#define UART011_OEIM BIT(10) /* overrun error interrupt mask */ +#define UART011_BEIM BIT(9) /* break error interrupt mask */ +#define UART011_PEIM BIT(8) /* parity error interrupt mask */ +#define UART011_FEIM BIT(7) /* framing error interrupt mask */ +#define UART011_RTIM BIT(6) /* receive timeout interrupt mask */ +#define UART011_TXIM BIT(5) /* transmit interrupt mask */ +#define UART011_RXIM BIT(4) /* receive interrupt mask */ +#define UART011_DSRMIM BIT(3) /* DSR interrupt mask */ +#define UART011_DCDMIM BIT(2) /* DCD interrupt mask */ +#define UART011_CTSMIM BIT(1) /* CTS interrupt mask */ +#define UART011_RIMIM BIT(0) /* RI interrupt mask */ + +#define UART011_OEIS BIT(10) /* overrun error interrupt status */ +#define UART011_BEIS BIT(9) /* break error interrupt status */ +#define UART011_PEIS BIT(8) /* parity error interrupt status */ +#define UART011_FEIS BIT(7) /* framing error interrupt status */ +#define UART011_RTIS BIT(6) /* receive timeout interrupt status */ +#define UART011_TXIS BIT(5) /* transmit interrupt status */ +#define UART011_RXIS BIT(4) /* receive interrupt status */ +#define UART011_DSRMIS BIT(3) /* DSR interrupt status */ +#define UART011_DCDMIS BIT(2) /* DCD interrupt status */ +#define UART011_CTSMIS BIT(1) /* CTS interrupt status */ +#define UART011_RIMIS BIT(0) /* RI interrupt status */ + +#define UART011_OEIC BIT(10) /* overrun error interrupt clear */ +#define UART011_BEIC BIT(9) /* break error interrupt clear */ +#define UART011_PEIC BIT(8) /* parity error interrupt clear */ +#define UART011_FEIC BIT(7) /* framing error interrupt clear */ +#define UART011_RTIC BIT(6) /* receive timeout interrupt clear */ +#define UART011_TXIC BIT(5) /* transmit interrupt clear */ +#define UART011_RXIC BIT(4) /* receive interrupt clear */ +#define UART011_DSRMIC BIT(3) /* DSR interrupt clear */ +#define UART011_DCDMIC BIT(2) /* DCD interrupt clear */ +#define UART011_CTSMIC BIT(1) /* CTS interrupt clear */ +#define UART011_RIMIC BIT(0) /* RI interrupt clear */ + +#define UART011_DMAONERR BIT(2) /* disable dma on error */ +#define UART011_TXDMAE BIT(1) /* enable transmit dma */ +#define UART011_RXDMAE BIT(0) /* enable receive dma */ + +#define UART01x_RSR_ANY (UART01x_RSR_OE | UART01x_RSR_BE | UART01x_RSR_PE | UART01x_RSR_FE) +#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD | UART01x_FR_DSR | UART01x_FR_CTS) #ifndef __ASSEMBLY__ struct amba_device; /* in uncompress this is included but amba/bus.h is not */ @@ -220,8 +229,8 @@ struct amba_pl011_data { bool dma_rx_poll_enable; unsigned int dma_rx_poll_rate; unsigned int dma_rx_poll_timeout; - void (*init) (void); - void (*exit) (void); + void (*init)(void); + void (*exit)(void); }; #endif diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 99a5201d9e62..dc7ed2f46886 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -33,126 +33,6 @@ struct pci_dev; extern int amd_iommu_detect(void); -/** - * amd_iommu_init_device() - Init device for use with IOMMUv2 driver - * @pdev: The PCI device to initialize - * @pasids: Number of PASIDs to support for this device - * - * This function does all setup for the device pdev so that it can be - * used with IOMMUv2. - * Returns 0 on success or negative value on error. - */ -extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids); - -/** - * amd_iommu_free_device() - Free all IOMMUv2 related device resources - * and disable IOMMUv2 usage for this device - * @pdev: The PCI device to disable IOMMUv2 usage for' - */ -extern void amd_iommu_free_device(struct pci_dev *pdev); - -/** - * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device - * @pdev: The PCI device to bind the task to - * @pasid: The PASID on the device the task should be bound to - * @task: the task to bind - * - * The function returns 0 on success or a negative value on error. - */ -extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid, - struct task_struct *task); - -/** - * amd_iommu_unbind_pasid() - Unbind a PASID from its task on - * a device - * @pdev: The device of the PASID - * @pasid: The PASID to unbind - * - * When this function returns the device is no longer using the PASID - * and the PASID is no longer bound to its task. - */ -extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid); - -/** - * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed - * PRI requests - * @pdev: The PCI device the call-back should be registered for - * @cb: The call-back function - * - * The IOMMUv2 driver invokes this call-back when it is unable to - * successfully handle a PRI request. The device driver can then decide - * which PRI response the device should see. Possible return values for - * the call-back are: - * - * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device - * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device - * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device, - * the device is required to disable - * PRI when it receives this response - * - * The function returns 0 on success or negative value on error. - */ -#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0 -#define AMD_IOMMU_INV_PRI_RSP_INVALID 1 -#define AMD_IOMMU_INV_PRI_RSP_FAIL 2 - -typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev, - u32 pasid, - unsigned long address, - u16); - -extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, - amd_iommu_invalid_ppr_cb cb); - -#define PPR_FAULT_EXEC (1 << 1) -#define PPR_FAULT_READ (1 << 2) -#define PPR_FAULT_WRITE (1 << 5) -#define PPR_FAULT_USER (1 << 6) -#define PPR_FAULT_RSVD (1 << 7) -#define PPR_FAULT_GN (1 << 8) - -/** - * amd_iommu_device_info() - Get information about IOMMUv2 support of a - * PCI device - * @pdev: PCI device to query information from - * @info: A pointer to an amd_iommu_device_info structure which will contain - * the information about the PCI device - * - * Returns 0 on success, negative value on error - */ - -#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */ -#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */ -#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */ -#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution - on memory pages */ -#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request - super-user privileges */ - -struct amd_iommu_device_info { - int max_pasids; - u32 flags; -}; - -extern int amd_iommu_device_info(struct pci_dev *pdev, - struct amd_iommu_device_info *info); - -/** - * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating - * a pasid context. This call-back is - * invoked when the IOMMUv2 driver needs to - * invalidate a PASID context, for example - * because the task that is bound to that - * context is about to exit. - * - * @pdev: The PCI device the call-back should be registered for - * @cb: The call-back function - */ - -typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, u32 pasid); - -extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, - amd_iommu_invalidate_ctx cb); #else /* CONFIG_AMD_IOMMU */ static inline int amd_iommu_detect(void) { return -ENODEV; } diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h new file mode 100644 index 000000000000..b4f818205216 --- /dev/null +++ b/include/linux/amd-pmf-io.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AMD Platform Management Framework Interface + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + * Basavaraj Natikar <Basavaraj.Natikar@amd.com> + */ + +#ifndef AMD_PMF_IO_H +#define AMD_PMF_IO_H + +#include <linux/types.h> + +/** + * enum sfh_message_type - Query the SFH message type + * @MT_HPD: Message ID to know the Human presence info from MP2 FW + * @MT_ALS: Message ID to know the Ambient light info from MP2 FW + */ +enum sfh_message_type { + MT_HPD, + MT_ALS, +}; + +/** + * enum sfh_hpd_info - Query the Human presence information + * @SFH_NOT_DETECTED: Check the HPD connection information from MP2 FW + * @SFH_USER_PRESENT: Check if the user is present from HPD sensor + * @SFH_USER_AWAY: Check if the user is away from HPD sensor + */ +enum sfh_hpd_info { + SFH_NOT_DETECTED, + SFH_USER_PRESENT, + SFH_USER_AWAY, +}; + +/** + * struct amd_sfh_info - get HPD sensor info from MP2 FW + * @ambient_light: Populates the ambient light information + * @user_present: Populates the user presence information + */ +struct amd_sfh_info { + u32 ambient_light; + u8 user_present; +}; + +int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op); +#endif diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index 446394f84606..6ad02ad9c7b4 100644 --- a/include/linux/amd-pstate.h +++ b/include/linux/amd-pstate.h @@ -70,6 +70,10 @@ struct amd_cpudata { u32 nominal_perf; u32 lowest_nonlinear_perf; u32 lowest_perf; + u32 min_limit_perf; + u32 max_limit_perf; + u32 min_limit_freq; + u32 max_limit_freq; u32 max_freq; u32 min_freq; diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index 5deaddbd7927..93a5f16d03f3 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -15,13 +15,13 @@ struct inode; struct file *anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags); -struct file *anon_inode_getfile_secure(const char *name, +struct file *anon_inode_create_getfile(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode); int anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags); -int anon_inode_getfd_secure(const char *name, +int anon_inode_create_getfd(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode); diff --git a/include/linux/apple-mailbox.h b/include/linux/apple-mailbox.h deleted file mode 100644 index 720fbb70294a..000000000000 --- a/include/linux/apple-mailbox.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ -/* - * Apple mailbox message format - * - * Copyright (C) 2021 The Asahi Linux Contributors - */ - -#ifndef _LINUX_APPLE_MAILBOX_H_ -#define _LINUX_APPLE_MAILBOX_H_ - -#include <linux/types.h> - -/* encodes a single 96bit message sent over the single channel */ -struct apple_mbox_msg { - u64 msg0; - u32 msg1; -}; - -#endif diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index a07b510e7dc5..a63d61ca55af 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -27,6 +27,13 @@ static inline unsigned long topology_get_cpu_scale(int cpu) void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); +DECLARE_PER_CPU(unsigned long, capacity_freq_ref); + +static inline unsigned long topology_get_freq_ref(int cpu) +{ + return per_cpu(capacity_freq_ref, cpu); +} + DECLARE_PER_CPU(unsigned long, arch_freq_scale); static inline unsigned long topology_get_freq_scale(int cpu) @@ -92,6 +99,7 @@ void update_siblings_masks(unsigned int cpu); void remove_cpu_topology(unsigned int cpuid); void reset_cpu_topology(void); int parse_acpi_topology(void); +void freq_inv_set_max_ratio(int cpu, u64 max_rate); #endif #endif /* _LINUX_ARCH_TOPOLOGY_H_ */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 7c67c17321d4..083f85653716 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -67,6 +67,8 @@ #define ARM_SMCCC_VERSION_1_3 0x10003 #define ARM_SMCCC_1_3_SVE_HINT 0x10000 +#define ARM_SMCCC_CALL_HINTS ARM_SMCCC_1_3_SVE_HINT + #define ARM_SMCCC_VERSION_FUNC_ID \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index cc060da51bec..3d0fde57ba90 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -6,6 +6,7 @@ #ifndef _LINUX_ARM_FFA_H #define _LINUX_ARM_FFA_H +#include <linux/bitfield.h> #include <linux/device.h> #include <linux/module.h> #include <linux/types.h> @@ -20,6 +21,7 @@ #define FFA_ERROR FFA_SMC_32(0x60) #define FFA_SUCCESS FFA_SMC_32(0x61) +#define FFA_FN64_SUCCESS FFA_SMC_64(0x61) #define FFA_INTERRUPT FFA_SMC_32(0x62) #define FFA_VERSION FFA_SMC_32(0x63) #define FFA_FEATURES FFA_SMC_32(0x64) @@ -54,6 +56,23 @@ #define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) #define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) #define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) +#define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D) +#define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E) +#define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F) +#define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80) +#define FFA_NOTIFICATION_SET FFA_SMC_32(0x81) +#define FFA_NOTIFICATION_GET FFA_SMC_32(0x82) +#define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83) +#define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83) +#define FFA_RX_ACQUIRE FFA_SMC_32(0x84) +#define FFA_SPM_ID_GET FFA_SMC_32(0x85) +#define FFA_MSG_SEND2 FFA_SMC_32(0x86) +#define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87) +#define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87) +#define FFA_MEM_PERM_GET FFA_SMC_32(0x88) +#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88) +#define FFA_MEM_PERM_SET FFA_SMC_32(0x89) +#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89) /* * For some calls it is necessary to use SMC64 to pass or return 64-bit values. @@ -76,6 +95,7 @@ #define FFA_RET_DENIED (-6) #define FFA_RET_RETRY (-7) #define FFA_RET_ABORTED (-8) +#define FFA_RET_NO_DATA (-9) /* FFA version encoding */ #define FFA_MAJOR_VERSION_MASK GENMASK(30, 16) @@ -86,6 +106,7 @@ (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \ FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor))) #define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0) +#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1) /** * FF-A specification mentions explicitly about '4K pages'. This should @@ -188,6 +209,8 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; } #define module_ffa_driver(__ffa_driver) \ module_driver(__ffa_driver, ffa_register, ffa_unregister) +extern struct bus_type ffa_bus_type; + /* FFA transport related */ struct ffa_partition_info { u16 id; @@ -278,8 +301,8 @@ struct ffa_mem_region { #define FFA_MEM_NON_SHAREABLE (0) #define FFA_MEM_OUTER_SHAREABLE (2) #define FFA_MEM_INNER_SHAREABLE (3) - u8 attributes; - u8 reserved_0; + /* Memory region attributes, upper byte MBZ pre v1.1 */ + u16 attributes; /* * Clear memory region contents after unmapping it from the sender and * before mapping it for any receiver. @@ -317,27 +340,41 @@ struct ffa_mem_region { * memory region. */ u64 tag; - u32 reserved_1; + /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */ + u32 ep_mem_size; /* * The number of `ffa_mem_region_attributes` entries included in this * transaction. */ u32 ep_count; /* - * An array of endpoint memory access descriptors. - * Each one specifies a memory region offset, an endpoint and the - * attributes with which this memory region should be mapped in that - * endpoint's page table. + * 16-byte aligned offset from the base address of this descriptor + * to the first element of the endpoint memory access descriptor array + * Valid only from v1.1 */ - struct ffa_mem_region_attributes ep_mem_access[]; + u32 ep_mem_offset; + /* MBZ, valid only from v1.1 */ + u32 reserved[3]; }; -#define COMPOSITE_OFFSET(x) \ - (offsetof(struct ffa_mem_region, ep_mem_access[x])) #define CONSTITUENTS_OFFSET(x) \ (offsetof(struct ffa_composite_mem_region, constituents[x])) -#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \ - (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y)) + +static inline u32 +ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version) +{ + u32 offset = count * sizeof(struct ffa_mem_region_attributes); + /* + * Earlier to v1.1, the endpoint memory descriptor array started at + * offset 32(i.e. offset of ep_mem_offset in the current structure) + */ + if (ffa_version <= FFA_VERSION_1_0) + offset += offsetof(struct ffa_mem_region, ep_mem_offset); + else + offset += sizeof(struct ffa_mem_region); + + return offset; +} struct ffa_mem_ops_args { bool use_txbuf; @@ -367,10 +404,30 @@ struct ffa_mem_ops { int (*memory_lend)(struct ffa_mem_ops_args *args); }; +struct ffa_cpu_ops { + int (*run)(struct ffa_device *dev, u16 vcpu); +}; + +typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data); +typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data); + +struct ffa_notifier_ops { + int (*sched_recv_cb_register)(struct ffa_device *dev, + ffa_sched_recv_cb cb, void *cb_data); + int (*sched_recv_cb_unregister)(struct ffa_device *dev); + int (*notify_request)(struct ffa_device *dev, bool per_vcpu, + ffa_notifier_cb cb, void *cb_data, int notify_id); + int (*notify_relinquish)(struct ffa_device *dev, int notify_id); + int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu, + u16 vcpu); +}; + struct ffa_ops { const struct ffa_info_ops *info_ops; const struct ffa_msg_ops *msg_ops; const struct ffa_mem_ops *mem_ops; + const struct ffa_cpu_ops *cpu_ops; + const struct ffa_notifier_ops *notifier_ops; }; #endif /* _LINUX_ARM_FFA_H */ diff --git a/include/linux/array_size.h b/include/linux/array_size.h new file mode 100644 index 000000000000..06d7d83196ca --- /dev/null +++ b/include/linux/array_size.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ARRAY_SIZE_H +#define _LINUX_ARRAY_SIZE_H + +#include <linux/compiler.h> + +/** + * ARRAY_SIZE - get the number of elements in array @arr + * @arr: array to be sized + */ +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) + +#endif /* _LINUX_ARRAY_SIZE_H */ diff --git a/include/linux/async.h b/include/linux/async.h index cce4ad31e8fc..33c9ff4afb49 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev) return async_schedule_node(func, dev, dev_to_node(dev)); } +bool async_schedule_dev_nocall(async_func_t func, struct device *dev); + /** * async_schedule_dev_domain - A device specific version of async_schedule_domain * @func: function to execute asynchronously diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index 18f5744dfb5d..5e95faa959c4 100644 --- a/include/linux/atomic/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -428,6 +428,19 @@ extern void raw_cmpxchg128_relaxed_not_implemented(void); #define raw_sync_cmpxchg arch_sync_cmpxchg +#ifdef arch_sync_try_cmpxchg +#define raw_sync_try_cmpxchg arch_sync_try_cmpxchg +#else +#define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif + /** * raw_atomic_read() - atomic load with relaxed ordering * @v: pointer to atomic_t @@ -459,8 +472,6 @@ raw_atomic_read_acquire(const atomic_t *v) { #if defined(arch_atomic_read_acquire) return arch_atomic_read_acquire(v); -#elif defined(arch_atomic_read) - return arch_atomic_read(v); #else int ret; @@ -508,8 +519,6 @@ raw_atomic_set_release(atomic_t *v, int i) { #if defined(arch_atomic_set_release) arch_atomic_set_release(v, i); -#elif defined(arch_atomic_set) - arch_atomic_set(v, i); #else if (__native_word(atomic_t)) { smp_store_release(&(v)->counter, i); @@ -2575,8 +2584,6 @@ raw_atomic64_read_acquire(const atomic64_t *v) { #if defined(arch_atomic64_read_acquire) return arch_atomic64_read_acquire(v); -#elif defined(arch_atomic64_read) - return arch_atomic64_read(v); #else s64 ret; @@ -2624,8 +2631,6 @@ raw_atomic64_set_release(atomic64_t *v, s64 i) { #if defined(arch_atomic64_set_release) arch_atomic64_set_release(v, i); -#elif defined(arch_atomic64_set) - arch_atomic64_set(v, i); #else if (__native_word(atomic64_t)) { smp_store_release(&(v)->counter, i); @@ -4657,4 +4662,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v) } #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 202b45c7db600ce36198eb1f1fc2c2d5268ace2d +// eec048affea735b8464f58e6d96992101f8f85f1 diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h index d401b406ef7c..54d7bbe0aeaa 100644 --- a/include/linux/atomic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -4998,6 +4998,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) +#define sync_try_cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kcsan_mb(); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ -// 1568f875fef72097413caab8339120c065a39aa4 +// 2cc4bc990fef44d3836ec108f11b610f3f438184 diff --git a/include/linux/audit.h b/include/linux/audit.h index 6a3a9e122bb5..0050ef288ab3 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -36,6 +36,7 @@ struct mqstat; struct audit_watch; struct audit_tree; struct sk_buff; +struct kern_ipc_perm; struct audit_krule { u32 pflags; @@ -117,6 +118,8 @@ enum audit_nfcfgop { AUDIT_NFT_OP_OBJ_RESET, AUDIT_NFT_OP_FLOWTABLE_REGISTER, AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, + AUDIT_NFT_OP_SETELEM_RESET, + AUDIT_NFT_OP_RULE_RESET, AUDIT_NFT_OP_INVALID, }; diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index d0807ad43f93..8e177b67e82f 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -4,6 +4,11 @@ #ifndef _VIRTCHNL_H_ #define _VIRTCHNL_H_ +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/overflow.h> +#include <uapi/linux/if_ether.h> + /* Description: * This header file describes the Virtual Function (VF) - Physical Function * (PF) communication protocol used by the drivers for all devices starting @@ -114,6 +119,7 @@ enum virtchnl_ops { VIRTCHNL_OP_GET_STATS = 15, VIRTCHNL_OP_RSVD = 16, VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18, /* opcode 19 is reserved */ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP, @@ -240,6 +246,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6) /* used to negotiate communicating link speeds in Mbps */ #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) +#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) @@ -295,7 +302,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); /* VIRTCHNL_OP_CONFIG_RX_QUEUE * VF sends this message to set up parameters for one RX queue. * External data buffer contains one instance of virtchnl_rxq_info. - * PF configures requested queue and returns a status code. + * PF configures requested queue and returns a status code. The + * crc_disable flag disables CRC stripping on the VF. Setting + * the crc_disable flag to 1 will disable CRC stripping for each + * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC + * offload must have been set prior to sending this info or the PF + * will ignore the request. This flag should be set the same for + * all of the queues for a VF. */ /* Rx queue config info */ @@ -307,7 +320,7 @@ struct virtchnl_rxq_info { u16 splithdr_enabled; /* deprecated with AVF 1.0 */ u32 databuffer_size; u32 max_pkt_size; - u8 pad0; + u8 crc_disable; u8 rxdid; u8 pad1[2]; u64 dma_ring_addr; @@ -900,6 +913,29 @@ struct virtchnl_rss_hena { VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); +/* Type of RSS algorithm */ +enum virtchnl_rss_algorithm { + VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, + VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1, + VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, + VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3, +}; + +/* VIRTCHNL_OP_CONFIG_RSS_HFUNC + * VF sends this message to configure the RSS hash function. Only supported + * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. + * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC + * by the PF. + */ +struct virtchnl_rss_hfunc { + u16 vsi_id; + u16 rss_algorithm; /* enum virtchnl_rss_algorithm */ + u32 reserved; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc); + /* VIRTCHNL_OP_ENABLE_CHANNELS * VIRTCHNL_OP_DISABLE_CHANNELS * VF sends these messages to enable or disable channels based on @@ -1084,14 +1120,6 @@ enum virtchnl_vfr_states { VIRTCHNL_VFR_VFACTIVE, }; -/* Type of RSS algorithm */ -enum virtchnl_rss_algorithm { - VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, - VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1, - VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, - VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3, -}; - #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 #define PROTO_HDR_SHIFT 5 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT) @@ -1531,6 +1559,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, vrl->lut_entries); } break; + case VIRTCHNL_OP_CONFIG_RSS_HFUNC: + valid_len = sizeof(struct virtchnl_rss_hfunc); + break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: break; case VIRTCHNL_OP_SET_RSS_HENA: diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index ae12696ec492..2ad261082bba 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -141,8 +141,6 @@ struct bdi_writeback { struct delayed_work dwork; /* work item used for writeback */ struct delayed_work bw_dwork; /* work item used for bandwidth estimate */ - unsigned long dirty_sleep; /* last wait */ - struct list_head bdi_node; /* anchored at bdi->wb_list */ #ifdef CONFIG_CGROUP_WRITEBACK @@ -179,6 +177,11 @@ struct backing_dev_info { * any dirty wbs, which is depended upon by bdi_has_dirty(). */ atomic_long_t tot_write_bandwidth; + /* + * Jiffies when last process was dirty throttled on this bdi. Used by + * blk-wbt. + */ + unsigned long last_bdp_sleep; struct bdi_writeback wb; /* the root writeback info for this bdi */ struct list_head wb_list; /* list of all wbs */ diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h new file mode 100644 index 000000000000..3f1fe1774f1b --- /dev/null +++ b/include/linux/backing-file.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Common helpers for stackable filesystems and backing files. + * + * Copyright (C) 2023 CTERA Networks. + */ + +#ifndef _LINUX_BACKING_FILE_H +#define _LINUX_BACKING_FILE_H + +#include <linux/file.h> +#include <linux/uio.h> +#include <linux/fs.h> + +struct backing_file_ctx { + const struct cred *cred; + struct file *user_file; + void (*accessed)(struct file *); + void (*end_write)(struct file *); +}; + +struct file *backing_file_open(const struct path *user_path, int flags, + const struct path *real_path, + const struct cred *cred); +ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter, + struct kiocb *iocb, int flags, + struct backing_file_ctx *ctx); +ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter, + struct kiocb *iocb, int flags, + struct backing_file_ctx *ctx); +ssize_t backing_file_splice_read(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags, + struct backing_file_ctx *ctx); +ssize_t backing_file_splice_write(struct pipe_inode_info *pipe, + struct file *out, loff_t *ppos, size_t len, + unsigned int flags, + struct backing_file_ctx *ctx); +int backing_file_mmap(struct file *file, struct vm_area_struct *vma, + struct backing_file_ctx *ctx); + +#endif /* _LINUX_BACKING_FILE_H */ diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h index 2426276b9bd3..670f2dae692f 100644 --- a/include/linux/badblocks.h +++ b/include/linux/badblocks.h @@ -15,6 +15,7 @@ #define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9) #define BB_LEN(x) (((x) & BB_LEN_MASK) + 1) #define BB_ACK(x) (!!((x) & BB_ACK_MASK)) +#define BB_END(x) (BB_OFFSET(x) + BB_LEN(x)) #define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63)) /* Bad block numbers are stored sorted in a single page. @@ -41,6 +42,12 @@ struct badblocks { sector_t size; /* in sectors */ }; +struct badblocks_context { + sector_t start; + sector_t len; + int ack; +}; + int badblocks_check(struct badblocks *bb, sector_t s, int sectors, sector_t *first_bad, int *bad_sectors); int badblocks_set(struct badblocks *bb, sector_t s, int sectors, @@ -63,4 +70,27 @@ static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb) } badblocks_exit(bb); } + +static inline int badblocks_full(struct badblocks *bb) +{ + return (bb->count >= MAX_BADBLOCKS); +} + +static inline int badblocks_empty(struct badblocks *bb) +{ + return (bb->count == 0); +} + +static inline void set_changed(struct badblocks *bb) +{ + if (bb->changed != 1) + bb->changed = 1; +} + +static inline void clear_changed(struct badblocks *bb) +{ + if (bb->changed != 0) + bb->changed = 0; +} + #endif diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8d51f69f9f5e..70f97f685bff 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -90,6 +90,16 @@ struct linux_binfmt { #endif } __randomize_layout; +#if IS_ENABLED(CONFIG_BINFMT_MISC) +struct binfmt_misc { + struct list_head entries; + rwlock_t entries_lock; + bool enabled; +} __randomize_layout; + +extern struct binfmt_misc init_binfmt_misc; +#endif + extern void __register_binfmt(struct linux_binfmt *fmt, int insert); /* Registration of default binfmt handlers */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 41d417ee1349..875d792bffff 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -286,6 +286,11 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, { struct bio_vec *bvec = bio_first_bvec_all(bio) + i; + if (unlikely(i >= bio->bi_vcnt)) { + fi->folio = NULL; + return; + } + fi->folio = page_folio(bvec->bv_page); fi->offset = bvec->bv_offset + PAGE_SIZE * (bvec->bv_page - &fi->folio->page); @@ -303,10 +308,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) fi->offset = 0; fi->length = min(folio_size(fi->folio), fi->_seg_count); fi->_next = folio_next(fi->folio); - } else if (fi->_i + 1 < bio->bi_vcnt) { - bio_first_folio(fi, bio, fi->_i + 1); } else { - fi->folio = NULL; + bio_first_folio(fi, bio, fi->_i + 1); } } @@ -324,6 +327,8 @@ enum bip_flags { BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ + BIP_INTEGRITY_USER = 1 << 5, /* Integrity payload is user address */ + BIP_COPY_USER = 1 << 6, /* Kernel bounce buffer in use */ }; /* @@ -718,6 +723,7 @@ static inline bool bioset_initialized(struct bio_set *bs) for_each_bio(_bio) \ bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) +int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed); extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); extern bool bio_integrity_prep(struct bio *); @@ -789,6 +795,12 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page, return 0; } +static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf, + ssize_t len, u32 seed) +{ + return -EINVAL; +} + #endif /* CONFIG_BLK_DEV_INTEGRITY */ /* diff --git a/include/linux/bitmap-str.h b/include/linux/bitmap-str.h new file mode 100644 index 000000000000..17caeca94cab --- /dev/null +++ b/include/linux/bitmap-str.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BITMAP_STR_H +#define __LINUX_BITMAP_STR_H + +int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); +int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); +extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); +extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); +int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits); +int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); +int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); + +#endif /* __LINUX_BITMAP_STR_H */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 03644237e1ef..99451431e4d6 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -6,10 +6,12 @@ #include <linux/align.h> #include <linux/bitops.h> +#include <linux/errno.h> #include <linux/find.h> #include <linux/limits.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/bitmap-str.h> struct device; @@ -200,14 +202,6 @@ bitmap_find_next_zero_area(unsigned long *map, align_mask, 0); } -int bitmap_parse(const char *buf, unsigned int buflen, - unsigned long *dst, int nbits); -int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, - unsigned long *dst, int nbits); -int bitmap_parselist(const char *buf, unsigned long *maskp, - int nmaskbits); -int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, - unsigned long *dst, int nbits); void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, unsigned int nbits); int bitmap_bitremap(int oldbit, @@ -216,23 +210,6 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, unsigned int bits); void bitmap_fold(unsigned long *dst, const unsigned long *orig, unsigned int sz, unsigned int nbits); -int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); -void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); -int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); - -#ifdef __BIG_ENDIAN -void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); -#else -#define bitmap_copy_le bitmap_copy -#endif -int bitmap_print_to_pagebuf(bool list, char *buf, - const unsigned long *maskp, int nmaskbits); - -extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, - int nmaskbits, loff_t off, size_t count); - -extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, - int nmaskbits, loff_t off, size_t count); #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) @@ -519,6 +496,66 @@ static inline void bitmap_next_set_region(unsigned long *bitmap, } /** + * bitmap_release_region - release allocated bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to release + * @order: region size (log base 2 of number of bits) to release + * + * This is the complement to __bitmap_find_free_region() and releases + * the found region (by clearing it in the bitmap). + */ +static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order) +{ + bitmap_clear(bitmap, pos, BIT(order)); +} + +/** + * bitmap_allocate_region - allocate bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to allocate + * @order: region size (log base 2 of number of bits) to allocate + * + * Allocate (set bits in) a specified region of a bitmap. + * + * Returns: 0 on success, or %-EBUSY if specified region wasn't + * free (not all bits were zero). + */ +static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) +{ + unsigned int len = BIT(order); + + if (find_next_bit(bitmap, pos + len, pos) < pos + len) + return -EBUSY; + bitmap_set(bitmap, pos, len); + return 0; +} + +/** + * bitmap_find_free_region - find a contiguous aligned mem region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @bits: number of bits in the bitmap + * @order: region size (log base 2 of number of bits) to find + * + * Find a region of free (zero) bits in a @bitmap of @bits bits and + * allocate them (set them to one). Only consider regions of length + * a power (@order) of two, aligned to that power of two, which + * makes the search algorithm much faster. + * + * Returns: the bit offset in bitmap of the allocated region, + * or -errno on failure. + */ +static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order) +{ + unsigned int pos, end; /* scans bitmap by regions of size order */ + + for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) { + if (!bitmap_allocate_region(bitmap, pos, order)) + return pos; + } + return -ENOMEM; +} + +/** * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. * @n: u64 value * diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 958ed7e89b30..7a8150a5f051 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -32,8 +32,6 @@ typedef __u32 __bitwise req_flags_t; #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) /* merge of different types, fail separately */ #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) -/* track inflight for MQ */ -#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) /* don't call prep for this one */ #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) /* use hctx->sched_tags */ @@ -393,9 +391,6 @@ struct blk_mq_hw_ctx { */ struct blk_mq_tags *sched_tags; - /** @run: Number of dispatched requests. */ - unsigned long run; - /** @numa_node: NUMA node the storage adapter has been connected to. */ unsigned int numa_node; /** @queue_num: Index of this hardware queue. */ @@ -832,6 +827,12 @@ void blk_mq_end_request_batch(struct io_comp_batch *ib); */ static inline bool blk_mq_need_time_stamp(struct request *rq) { + /* + * passthrough io doesn't use iostat accounting, cgroup stats + * and io scheduler functionalities. + */ + if (blk_rq_is_passthrough(rq)) + return false; return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); } diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h index 2580e05a8ab6..004b38a538ff 100644 --- a/include/linux/blk-pm.h +++ b/include/linux/blk-pm.h @@ -15,7 +15,6 @@ extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); extern void blk_pre_runtime_resume(struct request_queue *q); extern void blk_post_runtime_resume(struct request_queue *q); -extern void blk_set_runtime_active(struct request_queue *q); #else static inline void blk_pm_runtime_init(struct request_queue *q, struct device *dev) {} diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index d5c5e59ddbd2..f288c94374b3 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -49,26 +49,26 @@ struct block_device { bool bd_write_holder; bool bd_has_submit_bio; dev_t bd_dev; + struct inode *bd_inode; /* will die */ + atomic_t bd_openers; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ - struct inode * bd_inode; /* will die */ void * bd_claiming; void * bd_holder; const struct blk_holder_ops *bd_holder_ops; struct mutex bd_holder_lock; - /* The counter of freeze processes */ - int bd_fsfreeze_count; int bd_holders; struct kobject *bd_holder_dir; - /* Mutex for freeze */ - struct mutex bd_fsfreeze_mutex; - struct super_block *bd_fsfreeze_sb; + atomic_t bd_fsfreeze_count; /* number of freeze requests */ + struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */ struct partition_meta_info *bd_meta_info; #ifdef CONFIG_FAIL_MAKE_REQUEST bool bd_make_it_fail; #endif + bool bd_ro_warned; + int bd_writers; /* * keep this out-of-line as it's both big and not needed in the fast * path @@ -378,6 +378,8 @@ enum req_op { REQ_OP_DISCARD = (__force blk_opf_t)3, /* securely erase sectors */ REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, + /* write data at the current zone write pointer */ + REQ_OP_ZONE_APPEND = (__force blk_opf_t)7, /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, /* Open a zone */ @@ -386,12 +388,10 @@ enum req_op { REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, /* Transition a zone to full */ REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, - /* write data at the current zone write pointer */ - REQ_OP_ZONE_APPEND = (__force blk_opf_t)13, /* reset a zone write pointer */ - REQ_OP_ZONE_RESET = (__force blk_opf_t)15, + REQ_OP_ZONE_RESET = (__force blk_opf_t)13, /* reset all the zone present on the device */ - REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, + REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15, /* Driver private requests */ REQ_OP_DRV_IN = (__force blk_opf_t)34, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index eef450f25982..99e4f5e72213 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -124,6 +124,8 @@ typedef unsigned int __bitwise blk_mode_t; #define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) /* open for "writes" only for ioctls (specialy hack for floppy.c) */ #define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) +/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */ +#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5)) struct gendisk { /* @@ -264,18 +266,6 @@ static inline bool blk_op_is_passthrough(blk_opf_t op) } /* - * Zoned block device models (zoned limit). - * - * Note: This needs to be ordered from the least to the most severe - * restrictions for the inheritance in blk_stack_limits() to work. - */ -enum blk_zoned_model { - BLK_ZONED_NONE = 0, /* Regular block device */ - BLK_ZONED_HA, /* Host-aware zoned block device */ - BLK_ZONED_HM, /* Host-managed zoned block device */ -}; - -/* * BLK_BOUNCE_NONE: never bounce (default) * BLK_BOUNCE_HIGH: bounce all highmem pages */ @@ -316,7 +306,7 @@ struct queue_limits { unsigned char misaligned; unsigned char discard_misaligned; unsigned char raid_partial_stripes_expensive; - enum blk_zoned_model zoned; + bool zoned; /* * Drivers that set dma_alignment to less than 511 must be prepared to @@ -329,24 +319,15 @@ struct queue_limits { typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, void *data); -void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model); +void disk_set_zoned(struct gendisk *disk); -#ifdef CONFIG_BLK_DEV_ZONED #define BLK_ALL_ZONES ((unsigned int)-1) int blkdev_report_zones(struct block_device *bdev, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data); -unsigned int bdev_nr_zones(struct block_device *bdev); -extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, - sector_t sectors, sector_t nr_sectors, - gfp_t gfp_mask); + unsigned int nr_zones, report_zones_cb cb, void *data); +int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, + sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); int blk_revalidate_disk_zones(struct gendisk *disk, - void (*update_driver_data)(struct gendisk *disk)); -#else /* CONFIG_BLK_DEV_ZONED */ -static inline unsigned int bdev_nr_zones(struct block_device *bdev) -{ - return 0; -} -#endif /* CONFIG_BLK_DEV_ZONED */ + void (*update_driver_data)(struct gendisk *disk)); /* * Independent access ranges: struct blk_independent_access_range describes @@ -376,59 +357,51 @@ struct blk_independent_access_ranges { }; struct request_queue { - struct request *last_merge; - struct elevator_queue *elevator; - - struct percpu_ref q_usage_counter; + /* + * The queue owner gets to use this for whatever they like. + * ll_rw_blk doesn't touch it. + */ + void *queuedata; - struct blk_queue_stats *stats; - struct rq_qos *rq_qos; - struct mutex rq_qos_mutex; + struct elevator_queue *elevator; const struct blk_mq_ops *mq_ops; /* sw queues */ struct blk_mq_ctx __percpu *queue_ctx; + /* + * various queue flags, see QUEUE_* below + */ + unsigned long queue_flags; + + unsigned int rq_timeout; + unsigned int queue_depth; + refcount_t refs; + /* hw dispatch queues */ - struct xarray hctx_table; unsigned int nr_hw_queues; + struct xarray hctx_table; - /* - * The queue owner gets to use this for whatever they like. - * ll_rw_blk doesn't touch it. - */ - void *queuedata; - - /* - * various queue flags, see QUEUE_* below - */ - unsigned long queue_flags; - /* - * Number of contexts that have called blk_set_pm_only(). If this - * counter is above zero then only RQF_PM requests are processed. - */ - atomic_t pm_only; + struct percpu_ref q_usage_counter; - /* - * ida allocated id for this queue. Used to index queues from - * ioctx. - */ - int id; + struct request *last_merge; spinlock_t queue_lock; - struct gendisk *disk; + int quiesce_depth; - refcount_t refs; + struct gendisk *disk; /* * mq queue kobject */ struct kobject *mq_kobj; + struct queue_limits limits; + #ifdef CONFIG_BLK_DEV_INTEGRITY struct blk_integrity integrity; #endif /* CONFIG_BLK_DEV_INTEGRITY */ @@ -439,24 +412,40 @@ struct request_queue { #endif /* - * queue settings + * Number of contexts that have called blk_set_pm_only(). If this + * counter is above zero then only RQF_PM requests are processed. */ - unsigned long nr_requests; /* Max # of requests */ + atomic_t pm_only; + + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + struct mutex rq_qos_mutex; + + /* + * ida allocated id for this queue. Used to index queues from + * ioctx. + */ + int id; unsigned int dma_pad_mask; + /* + * queue settings + */ + unsigned long nr_requests; /* Max # of requests */ + #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct blk_crypto_profile *crypto_profile; struct kobject *crypto_kobject; #endif - unsigned int rq_timeout; - struct timer_list timeout; struct work_struct timeout_work; atomic_t nr_active_requests_shared_tags; + unsigned int required_elevator_features; + struct blk_mq_tags *sched_shared_tags; struct list_head icq_list; @@ -467,11 +456,12 @@ struct request_queue { struct mutex blkcg_mutex; #endif - struct queue_limits limits; + int node; - unsigned int required_elevator_features; + spinlock_t requeue_lock; + struct list_head requeue_list; + struct delayed_work requeue_work; - int node; #ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace __rcu *blk_trace; #endif @@ -481,10 +471,6 @@ struct request_queue { struct blk_flush_queue *fq; struct list_head flush_list; - struct list_head requeue_list; - spinlock_t requeue_lock; - struct delayed_work requeue_work; - struct mutex sysfs_lock; struct mutex sysfs_dir_lock; @@ -509,8 +495,6 @@ struct request_queue { */ struct mutex mq_freeze_lock; - int quiesce_depth; - struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; @@ -538,7 +522,7 @@ struct request_queue { #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ -#define QUEUE_FLAG_HW_WC 18 /* Write back caching supported */ +#define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ @@ -623,26 +607,14 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q) } #endif -static inline enum blk_zoned_model -blk_queue_zoned_model(struct request_queue *q) -{ - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) - return q->limits.zoned; - return BLK_ZONED_NONE; -} - static inline bool blk_queue_is_zoned(struct request_queue *q) { - switch (blk_queue_zoned_model(q)) { - case BLK_ZONED_HA: - case BLK_ZONED_HM: - return true; - default: - return false; - } + return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned; } #ifdef CONFIG_BLK_DEV_ZONED +unsigned int bdev_nr_zones(struct block_device *bdev); + static inline unsigned int disk_nr_zones(struct gendisk *disk) { return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0; @@ -687,6 +659,11 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev) } #else /* CONFIG_BLK_DEV_ZONED */ +static inline unsigned int bdev_nr_zones(struct block_device *bdev) +{ + return 0; +} + static inline unsigned int disk_nr_zones(struct gendisk *disk) { return 0; @@ -1080,7 +1057,14 @@ enum blk_default_limits { BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, }; -#define BLK_DEF_MAX_SECTORS 2560u +/* + * Default upper limit for the software max_sectors limit used for + * regular file system I/O. This can be increased through sysfs. + * + * Not to be confused with the max_hw_sector limit that is entirely + * controlled by the driver, usually based on hardware limits. + */ +#define BLK_DEF_MAX_SECTORS_CAP 2560u static inline unsigned long queue_segment_boundary(const struct request_queue *q) { @@ -1259,11 +1243,6 @@ static inline bool bdev_nowait(struct block_device *bdev) return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags); } -static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) -{ - return blk_queue_zoned_model(bdev_get_queue(bdev)); -} - static inline bool bdev_is_zoned(struct block_device *bdev) { return blk_queue_is_zoned(bdev_get_queue(bdev)); @@ -1468,8 +1447,23 @@ struct blk_holder_ops { * Sync the file system mounted on the block device. */ void (*sync)(struct block_device *bdev); + + /* + * Freeze the file system mounted on the block device. + */ + int (*freeze)(struct block_device *bdev); + + /* + * Thaw the file system mounted on the block device. + */ + int (*thaw)(struct block_device *bdev); }; +/* + * For filesystems using @fs_holder_ops, the @holder argument passed to + * helpers used to open and claim block devices via + * bd_prepare_to_claim() must point to a superblock. + */ extern const struct blk_holder_ops fs_holder_ops; /* @@ -1477,16 +1471,23 @@ extern const struct blk_holder_ops fs_holder_ops; * as stored in sb->s_flags. */ #define sb_open_mode(flags) \ - (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) + (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \ + (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) + +struct bdev_handle { + struct block_device *bdev; + void *holder; + blk_mode_t mode; +}; -struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder, +struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops); -struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode, +struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops); int bd_prepare_to_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops); void bd_abort_claiming(struct block_device *bdev, void *holder); -void blkdev_put(struct block_device *bdev, void *holder); +void bdev_release(struct bdev_handle *handle); /* just for blk-cgroup, don't use elsewhere */ struct block_device *blkdev_get_no_open(dev_t dev); @@ -1530,8 +1531,8 @@ static inline int early_lookup_bdev(const char *pathname, dev_t *dev) } #endif /* CONFIG_BLOCK */ -int freeze_bdev(struct block_device *bdev); -int thaw_bdev(struct block_device *bdev); +int bdev_freeze(struct block_device *bdev); +int bdev_thaw(struct block_device *bdev); struct io_comp_batch { struct request *req_list; diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h index e1a3c9c9754c..cffa38a73618 100644 --- a/include/linux/bootmem_info.h +++ b/include/linux/bootmem_info.h @@ -60,7 +60,7 @@ static inline void get_page_bootmem(unsigned long info, struct page *page, static inline void free_bootmem_page(struct page *page) { - kmemleak_free_part(page_to_virt(page), PAGE_SIZE); + kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); free_reserved_page(page); } #endif diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h index 7b121bd780eb..0985221d5478 100644 --- a/include/linux/bpf-cgroup-defs.h +++ b/include/linux/bpf-cgroup-defs.h @@ -28,19 +28,24 @@ enum cgroup_bpf_attach_type { CGROUP_INET6_BIND, CGROUP_INET4_CONNECT, CGROUP_INET6_CONNECT, + CGROUP_UNIX_CONNECT, CGROUP_INET4_POST_BIND, CGROUP_INET6_POST_BIND, CGROUP_UDP4_SENDMSG, CGROUP_UDP6_SENDMSG, + CGROUP_UNIX_SENDMSG, CGROUP_SYSCTL, CGROUP_UDP4_RECVMSG, CGROUP_UDP6_RECVMSG, + CGROUP_UNIX_RECVMSG, CGROUP_GETSOCKOPT, CGROUP_SETSOCKOPT, CGROUP_INET4_GETPEERNAME, CGROUP_INET6_GETPEERNAME, + CGROUP_UNIX_GETPEERNAME, CGROUP_INET4_GETSOCKNAME, CGROUP_INET6_GETSOCKNAME, + CGROUP_UNIX_GETSOCKNAME, CGROUP_INET_SOCK_RELEASE, CGROUP_LSM_START, CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1, diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 8506690dbb9c..a789266feac3 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -48,19 +48,24 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) CGROUP_ATYPE(CGROUP_INET6_BIND); CGROUP_ATYPE(CGROUP_INET4_CONNECT); CGROUP_ATYPE(CGROUP_INET6_CONNECT); + CGROUP_ATYPE(CGROUP_UNIX_CONNECT); CGROUP_ATYPE(CGROUP_INET4_POST_BIND); CGROUP_ATYPE(CGROUP_INET6_POST_BIND); CGROUP_ATYPE(CGROUP_UDP4_SENDMSG); CGROUP_ATYPE(CGROUP_UDP6_SENDMSG); + CGROUP_ATYPE(CGROUP_UNIX_SENDMSG); CGROUP_ATYPE(CGROUP_SYSCTL); CGROUP_ATYPE(CGROUP_UDP4_RECVMSG); CGROUP_ATYPE(CGROUP_UDP6_RECVMSG); + CGROUP_ATYPE(CGROUP_UNIX_RECVMSG); CGROUP_ATYPE(CGROUP_GETSOCKOPT); CGROUP_ATYPE(CGROUP_SETSOCKOPT); CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME); CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME); + CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME); CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME); CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME); + CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME); CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE); default: return CGROUP_BPF_ATTACH_TYPE_INVALID; @@ -120,6 +125,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk, int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, struct sockaddr *uaddr, + int *uaddrlen, enum cgroup_bpf_attach_type atype, void *t_ctx, u32 *flags); @@ -137,11 +143,12 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, - int *optname, char __user *optval, + int *optname, sockptr_t optval, int *optlen, char **kernel_optval); + int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, - int optname, char __user *optval, - int __user *optlen, int max_optlen, + int optname, sockptr_t optval, + sockptr_t optlen, int max_optlen, int retval); int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, @@ -230,22 +237,22 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND) -#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \ +#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled(atype)) \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ - NULL, NULL); \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \ + atype, NULL, NULL); \ __ret; \ }) -#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \ +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled(atype)) { \ lock_sock(sk); \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ - t_ctx, NULL); \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \ + atype, t_ctx, NULL); \ release_sock(sk); \ } \ __ret; \ @@ -256,14 +263,14 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE). */ -#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \ +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \ ({ \ u32 __flags = 0; \ int __ret = 0; \ if (cgroup_bpf_enabled(atype)) { \ lock_sock(sk); \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ - NULL, &__flags); \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \ + atype, NULL, &__flags); \ release_sock(sk); \ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \ *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \ @@ -276,29 +283,38 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \ (sk)->sk_prot->pre_connect) -#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT) +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT) + +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT) + +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL) + +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL) -#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT) +#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL) -#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL) +#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx) -#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL) +#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx) -#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx) +#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx) -#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx) +#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL) -#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL) +#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL) -#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL) +#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL) /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a * fullsock and its parent fullsock cannot be traced by @@ -377,7 +393,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, ({ \ int __ret = 0; \ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ - get_user(__ret, optlen); \ + copy_from_sockptr(&__ret, optlen, sizeof(int)); \ __ret; \ }) @@ -477,24 +493,27 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, } #define cgroup_bpf_enabled(atype) (0) -#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; }) -#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; }) +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 12596af59c00..e30100597d0a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -29,6 +29,7 @@ #include <linux/rcupdate_trace.h> #include <linux/static_call.h> #include <linux/memcontrol.h> +#include <linux/cfi.h> struct bpf_verifier_env; struct bpf_verifier_log; @@ -55,7 +56,7 @@ struct cgroup; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; extern struct kobject *btf_kobj; -extern struct bpf_mem_alloc bpf_global_ma; +extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma; extern bool bpf_global_ma_set; typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); @@ -106,7 +107,11 @@ struct bpf_map_ops { /* funcs called by prog_array and perf_event_array map */ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, int fd); - void (*map_fd_put_ptr)(void *ptr); + /* If need_defer is true, the implementation should guarantee that + * the to-be-put element is still alive before the bpf program, which + * may manipulate it, exists. + */ + void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer); int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); u32 (*map_fd_sys_lookup_elem)(void *ptr); void (*map_seq_show_elem)(struct bpf_map *map, void *key, @@ -180,14 +185,15 @@ enum btf_field_type { BPF_TIMER = (1 << 1), BPF_KPTR_UNREF = (1 << 2), BPF_KPTR_REF = (1 << 3), - BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF, - BPF_LIST_HEAD = (1 << 4), - BPF_LIST_NODE = (1 << 5), - BPF_RB_ROOT = (1 << 6), - BPF_RB_NODE = (1 << 7), - BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD | - BPF_RB_NODE | BPF_RB_ROOT, - BPF_REFCOUNT = (1 << 8), + BPF_KPTR_PERCPU = (1 << 4), + BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU, + BPF_LIST_HEAD = (1 << 5), + BPF_LIST_NODE = (1 << 6), + BPF_RB_ROOT = (1 << 7), + BPF_RB_NODE = (1 << 8), + BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE, + BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, + BPF_REFCOUNT = (1 << 9), }; typedef void (*btf_dtor_kfunc_t)(void *); @@ -271,7 +277,11 @@ struct bpf_map { */ atomic64_t refcnt ____cacheline_aligned; atomic64_t usercnt; - struct work_struct work; + /* rcu is used before freeing and work is only used during freeing */ + union { + struct work_struct work; + struct rcu_head rcu; + }; struct mutex freeze_mutex; atomic64_t writecnt; /* 'Ownership' of program-containing map is claimed by the first program @@ -287,6 +297,9 @@ struct bpf_map { } owner; bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ + bool free_after_mult_rcu_gp; + bool free_after_rcu_gp; + atomic64_t sleepable_refcnt; s64 __percpu *elem_count; }; @@ -300,6 +313,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type) case BPF_KPTR_UNREF: case BPF_KPTR_REF: return "kptr"; + case BPF_KPTR_PERCPU: + return "percpu_kptr"; case BPF_LIST_HEAD: return "bpf_list_head"; case BPF_LIST_NODE: @@ -325,6 +340,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type) return sizeof(struct bpf_timer); case BPF_KPTR_UNREF: case BPF_KPTR_REF: + case BPF_KPTR_PERCPU: return sizeof(u64); case BPF_LIST_HEAD: return sizeof(struct bpf_list_head); @@ -351,6 +367,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type) return __alignof__(struct bpf_timer); case BPF_KPTR_UNREF: case BPF_KPTR_REF: + case BPF_KPTR_PERCPU: return __alignof__(u64); case BPF_LIST_HEAD: return __alignof__(struct bpf_list_head); @@ -389,6 +406,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) case BPF_TIMER: case BPF_KPTR_UNREF: case BPF_KPTR_REF: + case BPF_KPTR_PERCPU: break; default: WARN_ON_ONCE(1); @@ -438,7 +456,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) size /= sizeof(long); while (size--) - *ldst++ = *lsrc++; + data_race(*ldst++ = *lsrc++); } /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */ @@ -903,10 +921,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) aux->ctx_field_size = size; } +static bool bpf_is_ldimm64(const struct bpf_insn *insn) +{ + return insn->code == (BPF_LD | BPF_IMM | BPF_DW); +} + static inline bool bpf_pseudo_func(const struct bpf_insn *insn) { - return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && - insn->src_reg == BPF_PSEUDO_FUNC; + return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC; } struct bpf_prog_ops { @@ -1029,6 +1051,22 @@ struct btf_func_model { */ #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) +/* Indicate that current trampoline is in a tail call context. Then, it has to + * cache and restore tail_call_cnt to avoid infinite tail call loop. + */ +#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7) + +/* + * Indicate the trampoline should be suitable to receive indirect calls; + * without this indirectly calling the generated code can result in #UD/#CP, + * depending on the CFI options. + * + * Used by bpf_struct_ops. + * + * Incompatible with FENTRY usage, overloads @func_addr argument. + */ +#define BPF_TRAMP_F_INDIRECT BIT(8) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. */ @@ -1068,10 +1106,17 @@ struct bpf_tramp_run_ctx; * fexit = a set of program to run after original function */ struct bpf_tramp_image; -int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, - void *orig_call); + void *func_addr); +void *arch_alloc_bpf_trampoline(unsigned int size); +void arch_free_bpf_trampoline(void *image, unsigned int size); +void arch_protect_bpf_trampoline(void *image, unsigned int size); +void arch_unprotect_bpf_trampoline(void *image, unsigned int size); +int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, void *func_addr); + u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx); void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start, @@ -1104,6 +1149,7 @@ enum bpf_tramp_prog_type { struct bpf_tramp_image { void *image; + int size; struct bpf_ksym ksym; struct percpu_ref pcref; void *ip_after_call; @@ -1173,7 +1219,11 @@ struct bpf_dispatcher { #endif }; -static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( +#ifndef __bpfcall +#define __bpfcall __nocfi +#endif + +static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func( const void *ctx, const struct bpf_insn *insnsi, bpf_func_t bpf_func) @@ -1211,6 +1261,8 @@ enum bpf_dynptr_type { int bpf_dynptr_check_size(u32 size); u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); +const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); +void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); #ifdef CONFIG_BPF_JIT int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); @@ -1263,7 +1315,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func #define DEFINE_BPF_DISPATCHER(name) \ __BPF_DISPATCHER_SC(name); \ - noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ + noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ bpf_func_t bpf_func) \ @@ -1286,7 +1338,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to); /* Called only from JIT-enabled code, so there's no need for stubs. */ -void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); +void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_del(struct bpf_ksym *ksym); @@ -1307,7 +1359,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info) { - return ERR_PTR(-EOPNOTSUPP); + return NULL; } static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} #define DEFINE_BPF_DISPATCHER(name) @@ -1330,6 +1382,8 @@ static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog) struct bpf_func_info_aux { u16 linkage; bool unreliable; + bool called : 1; + bool verified : 1; }; enum bpf_jit_poke_reason { @@ -1378,6 +1432,7 @@ struct bpf_prog_aux { u32 stack_depth; u32 id; u32 func_cnt; /* used by non-func prog as the number of func progs */ + u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ u32 attach_btf_id; /* in-kernel BTF type id to attach to */ u32 ctx_arg_info_size; @@ -1394,10 +1449,13 @@ struct bpf_prog_aux { bool dev_bound; /* Program is bound to the netdev. */ bool offload_requested; /* Program is bound and offloaded to the netdev. */ bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ + bool attach_tracing_prog; /* true if tracing another tracing program */ bool func_proto_unreliable; bool sleepable; bool tail_call_reachable; bool xdp_has_frags; + bool exception_cb; + bool exception_boundary; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; /* function name for valid attach_btf_id */ @@ -1408,6 +1466,9 @@ struct bpf_prog_aux { struct bpf_kfunc_desc_tab *kfunc_tab; struct bpf_kfunc_btf_tab *kfunc_btf_tab; u32 size_poke_tab; +#ifdef CONFIG_FINEIBT + struct bpf_ksym ksym_prefix; +#endif struct bpf_ksym ksym; const struct bpf_prog_ops *ops; struct bpf_map **used_maps; @@ -1420,6 +1481,7 @@ struct bpf_prog_aux { int cgroup_atype; /* enum cgroup_bpf_attach_type */ struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; char name[BPF_OBJ_NAME_LEN]; + u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64); #ifdef CONFIG_SECURITY void *security; #endif @@ -1617,6 +1679,7 @@ struct bpf_struct_ops { struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; u32 type_id; u32 value_id; + void *cfi_stubs; }; #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) @@ -1630,6 +1693,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, struct bpf_tramp_link *link, const struct btf_func_model *model, + void *stub_func, void *image, void *image_end); static inline bool bpf_try_module_get(const void *data, struct module *owner) { @@ -2043,6 +2107,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec); bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); void bpf_obj_free_timer(const struct btf_record *rec, void *obj); void bpf_obj_free_fields(const struct btf_record *rec, void *obj); +void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); @@ -2149,12 +2214,12 @@ static inline bool bpf_allow_uninit_stack(void) static inline bool bpf_bypass_spec_v1(void) { - return perfmon_capable(); + return cpu_mitigations_off() || perfmon_capable(); } static inline bool bpf_bypass_spec_v4(void) { - return perfmon_capable(); + return cpu_mitigations_off() || perfmon_capable(); } int bpf_map_new_fd(struct bpf_map *map, int flags); @@ -2402,14 +2467,11 @@ int btf_distill_func_proto(struct bpf_verifier_log *log, struct btf_func_model *m); struct bpf_reg_state; -int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, - struct bpf_reg_state *regs); -int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, - struct bpf_reg_state *regs); -int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, - struct bpf_reg_state *reg); +int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog); int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, struct btf *btf, const struct btf_type *t); +const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt, + int comp_idx, const char *tag_key); struct bpf_prog *bpf_prog_by_id(u32 id); struct bpf_link *bpf_link_by_id(u32 id); @@ -2461,6 +2523,9 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, enum bpf_dynptr_type type, u32 offset, u32 size); void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); + +bool dev_check_flush(void); +bool cpu_map_check_flush(void); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -2905,6 +2970,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ +static __always_inline void +bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) +{ + const struct bpf_prog_array_item *item; + struct bpf_prog *prog; + + if (unlikely(!array)) + return; + + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + bpf_prog_inc_misses_counter(prog); + item++; + } +} + #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) void bpf_sk_reuseport_detach(struct sock *sk); int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, @@ -3134,6 +3215,9 @@ enum bpf_text_poke_type { int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, void *addr1, void *addr2); +void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, + struct bpf_prog *new, struct bpf_prog *old); + void *bpf_arch_text_copy(void *dst, void *src, size_t len); int bpf_arch_text_invalidate(void *dst, size_t len); @@ -3183,4 +3267,9 @@ static inline gfp_t bpf_memcg_flags(gfp_t flags) return flags; } +static inline bool bpf_is_subprog(const struct bpf_prog *prog) +{ + return prog->aux->func_idx != 0; +} + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h index d644bbb298af..aaf004d94322 100644 --- a/include/linux/bpf_mem_alloc.h +++ b/include/linux/bpf_mem_alloc.h @@ -11,6 +11,8 @@ struct bpf_mem_caches; struct bpf_mem_alloc { struct bpf_mem_caches __percpu *caches; struct bpf_mem_cache __percpu *cache; + struct obj_cgroup *objcg; + bool percpu; struct work_struct work; }; @@ -20,8 +22,15 @@ struct bpf_mem_alloc { * 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects. * Alloc and free are done with bpf_mem_{alloc,free}() and the size of * the returned object is given by the size argument of bpf_mem_alloc(). + * If percpu equals true, error will be returned in order to avoid + * large memory consumption and the below bpf_mem_alloc_percpu_unit_init() + * should be used to do on-demand per-cpu allocation for each size. */ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu); +/* Initialize a non-fix-size percpu memory allocator */ +int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg); +/* The percpu allocation with a specific unit size. */ +int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size); void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma); /* kmalloc/kfree equivalent: */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index fc0d6f32c687..94baced5a1ad 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -142,9 +142,13 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) #ifdef CONFIG_NET BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) +BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter) +BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx) +BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit) #endif #ifdef CONFIG_PERF_EVENTS BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi) BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops) +BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index b6e58dab8e27..d07d857ca67f 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -275,6 +275,11 @@ struct bpf_reference_state { int callback_ref; }; +struct bpf_retval_range { + s32 minval; + s32 maxval; +}; + /* state of the program: * type of all registers and stack info */ @@ -297,23 +302,67 @@ struct bpf_func_state { * void foo(void) { bpf_timer_set_callback(,foo); } */ u32 async_entry_cnt; + struct bpf_retval_range callback_ret_range; bool in_callback_fn; - struct tnum callback_ret_range; bool in_async_callback_fn; + bool in_exception_callback_fn; + /* For callback calling functions that limit number of possible + * callback executions (e.g. bpf_loop) keeps track of current + * simulated iteration number. + * Value in frame N refers to number of times callback with frame + * N+1 was simulated, e.g. for the following call: + * + * bpf_loop(..., fn, ...); | suppose current frame is N + * | fn would be simulated in frame N+1 + * | number of simulations is tracked in frame N + */ + u32 callback_depth; /* The following fields should be last. See copy_func_state() */ int acquired_refs; struct bpf_reference_state *refs; - int allocated_stack; + /* The state of the stack. Each element of the array describes BPF_REG_SIZE + * (i.e. 8) bytes worth of stack memory. + * stack[0] represents bytes [*(r10-8)..*(r10-1)] + * stack[1] represents bytes [*(r10-16)..*(r10-9)] + * ... + * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)] + */ struct bpf_stack_state *stack; + /* Size of the current stack, in bytes. The stack state is tracked below, in + * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE. + */ + int allocated_stack; +}; + +#define MAX_CALL_FRAMES 8 + +/* instruction history flags, used in bpf_jmp_history_entry.flags field */ +enum { + /* instruction references stack slot through PTR_TO_STACK register; + * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8) + * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512, + * 8 bytes per slot, so slot index (spi) is [0, 63]) + */ + INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */ + + INSN_F_SPI_MASK = 0x3f, /* 6 bits */ + INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */ + + INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */ }; -struct bpf_idx_pair { - u32 prev_idx; +static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); +static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); + +struct bpf_jmp_history_entry { u32 idx; + /* insn idx can't be bigger than 1 million */ + u32 prev_idx : 22; + /* special flags, e.g., whether insn is doing register stack spill/load */ + u32 flags : 10; }; -#define MAX_CALL_FRAMES 8 /* Maximum number of register states that can exist at once */ #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES) struct bpf_verifier_state { @@ -372,32 +421,48 @@ struct bpf_verifier_state { struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; + /* If this state was ever pointed-to by other state's loop_entry field + * this flag would be set to true. Used to avoid freeing such states + * while they are still in use. + */ + bool used_as_loop_entry; /* first and last insn idx of this verifier state */ u32 first_insn_idx; u32 last_insn_idx; + /* If this state is a part of states loop this field points to some + * parent of this state such that: + * - it is also a member of the same states loop; + * - DFS states traversal starting from initial state visits loop_entry + * state before this state. + * Used to compute topmost loop entry for state loops. + * State loops might appear because of open coded iterators logic. + * See get_loop_entry() for more information. + */ + struct bpf_verifier_state *loop_entry; /* jmp history recorded from first to last. * backtracking is using it to go from last to first. * For most states jmp_history_cnt is [0-3]. * For loops can go up to ~40. */ - struct bpf_idx_pair *jmp_history; + struct bpf_jmp_history_entry *jmp_history; u32 jmp_history_cnt; + u32 dfs_depth; + u32 callback_unroll_depth; }; -#define bpf_get_spilled_reg(slot, frame) \ +#define bpf_get_spilled_reg(slot, frame, mask) \ (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ - (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ + ((1 << frame->stack[slot].slot_type[0]) & (mask))) \ ? &frame->stack[slot].spilled_ptr : NULL) /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ -#define bpf_for_each_spilled_reg(iter, frame, reg) \ - for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ +#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \ + for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \ iter < frame->allocated_stack / BPF_REG_SIZE; \ - iter++, reg = bpf_get_spilled_reg(iter, frame)) + iter++, reg = bpf_get_spilled_reg(iter, frame, mask)) -/* Invoke __expr over regsiters in __vst, setting __state and __reg */ -#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ +#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \ ({ \ struct bpf_verifier_state *___vstate = __vst; \ int ___i, ___j; \ @@ -409,7 +474,7 @@ struct bpf_verifier_state { __reg = &___regs[___j]; \ (void)(__expr); \ } \ - bpf_for_each_spilled_reg(___j, __state, __reg) { \ + bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \ if (!__reg) \ continue; \ (void)(__expr); \ @@ -417,6 +482,10 @@ struct bpf_verifier_state { } \ }) +/* Invoke __expr over regsiters in __vst, setting __state and __reg */ +#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ + bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr) + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; @@ -480,6 +549,7 @@ struct bpf_insn_aux_data { bool zext_dst; /* this insn zero extends dst reg */ bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */ bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */ + bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */ u8 alu_state; /* used in combination with alu_limit */ /* below fields are initialized once */ @@ -490,6 +560,10 @@ struct bpf_insn_aux_data { * this instruction, regardless of any heuristics */ bool force_checkpoint; + /* true if instruction is a call to a helper function that + * accepts callback function as a parameter. + */ + bool calls_callback; }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ @@ -532,15 +606,28 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) #define BPF_MAX_SUBPROGS 256 +struct bpf_subprog_arg_info { + enum bpf_arg_type arg_type; + union { + u32 mem_size; + }; +}; + struct bpf_subprog_info { /* 'start' has to be the first field otherwise find_subprog() won't work */ u32 start; /* insn idx of function entry point */ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u16 stack_depth; /* max. stack depth used by this function */ - bool has_tail_call; - bool tail_call_reachable; - bool has_ld_abs; - bool is_async_cb; + bool has_tail_call: 1; + bool tail_call_reachable: 1; + bool has_ld_abs: 1; + bool is_cb: 1; + bool is_async_cb: 1; + bool is_exception_cb: 1; + bool args_cached: 1; + + u8 arg_cnt; + struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; }; struct bpf_verifier_env; @@ -579,6 +666,7 @@ struct bpf_verifier_env { int stack_size; /* number of states to be processed */ bool strict_alignment; /* perform strict pointer alignment checks */ bool test_state_freq; /* test verifier with different pruning frequency */ + bool test_reg_invariants; /* fail verification on register invariants violations */ struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ struct bpf_verifier_state_list *free_list; @@ -587,17 +675,24 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 used_btf_cnt; /* number of used BTF objects */ u32 id_gen; /* used to generate unique reg IDs */ + u32 hidden_subprog_cnt; /* number of hidden subprogs */ + int exception_callback_subprog; bool explore_alu_limits; bool allow_ptr_leaks; + /* Allow access to uninitialized stack memory. Writes with fixed offset are + * always allowed, so this refers to reads (with fixed or variable offset), + * to writes with variable offset and to indirect (helper) accesses. + */ bool allow_uninit_stack; bool bpf_capable; bool bypass_spec_v1; bool bypass_spec_v4; bool seen_direct_write; + bool seen_exception; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; - struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; + struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */ union { struct bpf_idmap idmap_scratch; struct bpf_idset idset_scratch; @@ -608,6 +703,7 @@ struct bpf_verifier_env { int cur_stack; } cfg; struct backtrack_state bt; + struct bpf_jmp_history_entry *cur_hist_ent; u32 pass_cnt; /* number of times do_check() was called */ u32 subprog_cnt; /* number of instructions analyzed by the verifier */ @@ -642,6 +738,16 @@ struct bpf_verifier_env { char tmp_str_buf[TMP_STR_BUF_LEN]; }; +static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog) +{ + return &env->prog->aux->func_info_aux[subprog]; +} + +static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog) +{ + return &env->subprog_info[subprog]; +} + __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, va_list args); __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, @@ -653,6 +759,10 @@ int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level, void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos); int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual); +__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env, + u32 insn_off, + const char *prefix_fmt, ...); + static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) { struct bpf_verifier_state *cur = env->cur_state; @@ -675,14 +785,6 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, void bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); -int check_ptr_off_reg(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, int regno); -int check_func_arg_reg_off(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, int regno, - enum bpf_arg_type arg_type); -int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, - u32 regno, u32 mem_size); - /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, struct btf *btf, u32 btf_id) @@ -752,4 +854,76 @@ static inline bool bpf_type_has_unsafe_modifiers(u32 type) return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS; } +static inline bool type_is_ptr_alloc_obj(u32 type) +{ + return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC; +} + +static inline bool type_is_non_owning_ref(u32 type) +{ + return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF; +} + +static inline bool type_is_pkt_pointer(enum bpf_reg_type type) +{ + type = base_type(type); + return type == PTR_TO_PACKET || + type == PTR_TO_PACKET_META; +} + +static inline bool type_is_sk_pointer(enum bpf_reg_type type) +{ + return type == PTR_TO_SOCKET || + type == PTR_TO_SOCK_COMMON || + type == PTR_TO_TCP_SOCK || + type == PTR_TO_XDP_SOCK; +} + +static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno) +{ + env->scratched_regs |= 1U << regno; +} + +static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi) +{ + env->scratched_stack_slots |= 1ULL << spi; +} + +static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno) +{ + return (env->scratched_regs >> regno) & 1; +} + +static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno) +{ + return (env->scratched_stack_slots >> regno) & 1; +} + +static inline bool verifier_state_scratched(const struct bpf_verifier_env *env) +{ + return env->scratched_regs || env->scratched_stack_slots; +} + +static inline void mark_verifier_state_clean(struct bpf_verifier_env *env) +{ + env->scratched_regs = 0U; + env->scratched_stack_slots = 0ULL; +} + +/* Used for printing the entire verifier state. */ +static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env) +{ + env->scratched_regs = ~0U; + env->scratched_stack_slots = ~0ULL; +} + +const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type); +const char *dynptr_type_str(enum bpf_dynptr_type type); +const char *iter_type_str(const struct btf *btf, u32 btf_id); +const char *iter_state_str(enum bpf_iter_state state); + +void print_verifier_state(struct bpf_verifier_env *env, + const struct bpf_func_state *state, bool print_all); +void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state); + #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h deleted file mode 100644 index 736ded4905e0..000000000000 --- a/include/linux/bpfilter.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_BPFILTER_H -#define _LINUX_BPFILTER_H - -#include <uapi/linux/bpfilter.h> -#include <linux/usermode_driver.h> -#include <linux/sockptr.h> - -struct sock; -int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, - unsigned int optlen); -int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, - int __user *optlen); - -struct bpfilter_umh_ops { - struct umd_info info; - /* since ip_getsockopt() can run in parallel, serialize access to umh */ - struct mutex lock; - int (*sockopt)(struct sock *sk, int optname, sockptr_t optval, - unsigned int optlen, bool is_set); - int (*start)(void); -}; -extern struct bpfilter_umh_ops bpfilter_ops; -#endif diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index c55810a43541..1394ba302367 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -11,6 +11,7 @@ #define PHY_ID_BCM50610 0x0143bd60 #define PHY_ID_BCM50610M 0x0143bd70 +#define PHY_ID_BCM5221 0x004061e0 #define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 @@ -331,6 +332,15 @@ #define BCM54XX_WOL_INT_STATUS (MII_BCM54XX_EXP_SEL_WOL + 0x94) +/* BCM5221 Registers */ +#define BCM5221_AEGSR 0x1C +#define BCM5221_AEGSR_MDIX_STATUS BIT(13) +#define BCM5221_AEGSR_MDIX_MAN_SWAP BIT(12) +#define BCM5221_AEGSR_MDIX_DIS BIT(11) + +#define BCM5221_SHDW_AM4_EN_CLK_LPM BIT(2) +#define BCM5221_SHDW_AM4_FORCE_LPM BIT(1) + /*****************************************************************************/ /* Fast Ethernet Transceiver definitions. */ /*****************************************************************************/ diff --git a/include/linux/btf.h b/include/linux/btf.h index 928113a80a95..cf5c6ff48981 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -74,6 +74,7 @@ #define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */ #define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */ #define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */ +#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the @@ -83,6 +84,17 @@ */ #define __bpf_kfunc __used noinline +#define __bpf_kfunc_start_defs() \ + __diag_push(); \ + __diag_ignore_all("-Wmissing-declarations", \ + "Global kfuncs as their definitions will be in BTF");\ + __diag_ignore_all("-Wmissing-prototypes", \ + "Global kfuncs as their definitions will be in BTF") + +#define __bpf_kfunc_end_defs() __diag_pop() +#define __bpf_hook_start() __bpf_kfunc_start_defs() +#define __bpf_hook_end() __bpf_kfunc_end_defs() + /* * Return the name of the passed struct, if exists, or halt the build if for * example the structure gets renamed. In this way, developers have to revisit @@ -500,7 +512,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id); int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, struct module *owner); struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id); -const struct btf_member * +const struct btf_type * btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, enum bpf_prog_type prog_type, int arg); diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index a3462a9b8e18..a9cb10b0e2e9 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -49,7 +49,7 @@ word \ ____BTF_ID(symbol, word) #define __ID(prefix) \ - __PASTE(prefix, __COUNTER__) + __PASTE(__PASTE(prefix, __COUNTER__), __LINE__) /* * The BTF_ID defines unique symbol for each ID pointing diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 4ede47649a81..d78454a4dd1f 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -171,7 +171,10 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh) return test_bit_acquire(BH_Uptodate, &bh->b_state); } -#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) +static inline unsigned long bh_offset(const struct buffer_head *bh) +{ + return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1); +} /* If we *know* page->private refers to buffer_heads */ #define page_buffers(page) \ @@ -195,16 +198,13 @@ void touch_buffer(struct buffer_head *bh); void folio_set_bh(struct buffer_head *bh, struct folio *folio, unsigned long offset); struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, - bool retry); + gfp_t gfp); struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, bool retry); -void create_empty_buffers(struct page *, unsigned long, - unsigned long b_state); -void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize, - unsigned long b_state); +struct buffer_head *create_empty_buffers(struct folio *folio, + unsigned long blocksize, unsigned long b_state); void end_buffer_read_sync(struct buffer_head *bh, int uptodate); void end_buffer_write_sync(struct buffer_head *bh, int uptodate); -void end_buffer_async_write(struct buffer_head *bh, int uptodate); /* Things to do with buffers at mapping->private_list */ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); @@ -224,8 +224,8 @@ void __wait_on_buffer(struct buffer_head *); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, unsigned size); -struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, - unsigned size, gfp_t gfp); +struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, + unsigned size, gfp_t gfp); void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); @@ -251,11 +251,10 @@ void __bh_read_batch(int nr, struct buffer_head *bhs[], * address_spaces. */ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); -int block_write_full_page(struct page *page, get_block_t *get_block, - struct writeback_control *wbc); +int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, + void *get_block); int __block_write_full_folio(struct inode *inode, struct folio *folio, - get_block_t *get_block, struct writeback_control *wbc, - bh_end_io_t *handler); + get_block_t *get_block, struct writeback_control *wbc); int block_read_full_folio(struct folio *, get_block_t *); bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, @@ -269,7 +268,6 @@ int generic_write_end(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page *, void *); void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to); -void clean_page_buffers(struct page *page); int cont_write_begin(struct file *, struct address_space *, loff_t, unsigned, struct page **, void **, get_block_t *, loff_t *); @@ -335,17 +333,38 @@ sb_breadahead(struct super_block *sb, sector_t block) __breadahead(sb->s_bdev, block, sb->s_blocksize); } -static inline struct buffer_head * -sb_getblk(struct super_block *sb, sector_t block) +static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, + sector_t block, unsigned size) { - return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); + gfp_t gfp; + + gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); + gfp |= __GFP_NOFAIL; + + return bdev_getblk(bdev, block, size, gfp); } +static inline struct buffer_head *__getblk(struct block_device *bdev, + sector_t block, unsigned size) +{ + gfp_t gfp; -static inline struct buffer_head * -sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp) + gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); + gfp |= __GFP_MOVABLE | __GFP_NOFAIL; + + return bdev_getblk(bdev, block, size, gfp); +} + +static inline struct buffer_head *sb_getblk(struct super_block *sb, + sector_t block) +{ + return __getblk(sb->s_bdev, block, sb->s_blocksize); +} + +static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb, + sector_t block, gfp_t gfp) { - return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp); + return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp); } static inline struct buffer_head * @@ -382,20 +401,6 @@ static inline void lock_buffer(struct buffer_head *bh) __lock_buffer(bh); } -static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, - sector_t block, - unsigned size) -{ - return __getblk_gfp(bdev, block, size, 0); -} - -static inline struct buffer_head *__getblk(struct block_device *bdev, - sector_t block, - unsigned size) -{ - return __getblk_gfp(bdev, block, size, __GFP_MOVABLE); -} - static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags) { if (!buffer_uptodate(bh) && trylock_buffer(bh)) { @@ -447,6 +452,28 @@ __bread(struct block_device *bdev, sector_t block, unsigned size) return __bread_gfp(bdev, block, size, __GFP_MOVABLE); } +/** + * get_nth_bh - Get a reference on the n'th buffer after this one. + * @bh: The buffer to start counting from. + * @count: How many buffers to skip. + * + * This is primarily useful for finding the nth buffer in a folio; in + * that case you pass the head buffer and the byte offset in the folio + * divided by the block size. It can be used for other purposes, but + * it will wrap at the end of the folio rather than returning NULL or + * proceeding to the next folio for you. + * + * Return: The requested buffer with an elevated refcount. + */ +static inline __must_check +struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count) +{ + while (count--) + bh = bh->b_this_page; + get_bh(bh); + return bh; +} + bool block_dirty_folio(struct address_space *mapping, struct folio *folio); #ifdef CONFIG_BUFFER_HEAD diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 3b7a0ff4642f..8a582d242f06 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -2,10 +2,11 @@ #ifndef _LINUX_BUILDID_H #define _LINUX_BUILDID_H -#include <linux/mm_types.h> +#include <linux/types.h> #define BUILD_ID_SIZE_MAX 20 +struct vm_area_struct; int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); diff --git a/include/linux/cache.h b/include/linux/cache.h index 9900d20b76c2..0ecb17bb6883 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -85,6 +85,31 @@ #define cache_line_size() L1_CACHE_BYTES #endif +#ifndef __cacheline_group_begin +#define __cacheline_group_begin(GROUP) \ + __u8 __cacheline_group_begin__##GROUP[0] +#endif + +#ifndef __cacheline_group_end +#define __cacheline_group_end(GROUP) \ + __u8 __cacheline_group_end__##GROUP[0] +#endif + +#ifndef CACHELINE_ASSERT_GROUP_MEMBER +#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \ + BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \ + offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \ + offsetofend(TYPE, MEMBER) <= \ + offsetof(TYPE, __cacheline_group_end__##GROUP))) +#endif + +#ifndef CACHELINE_ASSERT_GROUP_SIZE +#define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \ + BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \ + offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \ + SIZE) +#endif + /* * Helper to add padding within a struct to ensure data fall into separate * cachelines. diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index a5cfd44fab45..d504eb4b49ab 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -73,6 +73,7 @@ struct cacheinfo { struct cpu_cacheinfo { struct cacheinfo *info_list; + unsigned int per_cpu_data_slice_size; unsigned int num_levels; unsigned int num_leaves; bool cpu_map_populated; diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 982ba245eb41..1b92aed49363 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -195,6 +195,10 @@ int can_restart_now(struct net_device *dev); void can_bus_off(struct net_device *dev); const char *can_get_state_str(const enum can_state state); +void can_state_get_by_berr_counter(const struct net_device *dev, + const struct can_berr_counter *bec, + enum can_state *tx_state, + enum can_state *rx_state); void can_change_state(struct net_device *dev, struct can_frame *cf, enum can_state tx_state, enum can_state rx_state); diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h index bead71b7bc73..6355a36a3f81 100644 --- a/include/linux/cdx/cdx_bus.h +++ b/include/linux/cdx/cdx_bus.h @@ -21,13 +21,19 @@ struct cdx_controller; enum { + CDX_DEV_BUS_MASTER_CONF, CDX_DEV_RESET_CONF, }; struct cdx_device_config { u8 type; + bool bus_master_enable; }; +typedef int (*cdx_bus_enable_cb)(struct cdx_controller *cdx, u8 bus_num); + +typedef int (*cdx_bus_disable_cb)(struct cdx_controller *cdx, u8 bus_num); + typedef int (*cdx_scan_cb)(struct cdx_controller *cdx); typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx, @@ -35,6 +41,19 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx, struct cdx_device_config *dev_config); /** + * CDX_DEVICE - macro used to describe a specific CDX device + * @vend: the 16 bit CDX Vendor ID + * @dev: the 16 bit CDX Device ID + * + * This macro is used to create a struct cdx_device_id that matches a + * specific device. The subvendor and subdevice fields will be set to + * CDX_ANY_ID. + */ +#define CDX_DEVICE(vend, dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = CDX_ANY_ID, .subdevice = CDX_ANY_ID + +/** * CDX_DEVICE_DRIVER_OVERRIDE - macro used to describe a CDX device with * override_only flags. * @vend: the 16 bit CDX Vendor ID @@ -42,18 +61,24 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx, * @driver_override: the 32 bit CDX Device override_only * * This macro is used to create a struct cdx_device_id that matches only a - * driver_override device. + * driver_override device. The subvendor and subdevice fields will be set to + * CDX_ANY_ID. */ #define CDX_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \ - .vendor = (vend), .device = (dev), .override_only = (driver_override) + .vendor = (vend), .device = (dev), .subvendor = CDX_ANY_ID,\ + .subdevice = CDX_ANY_ID, .override_only = (driver_override) /** * struct cdx_ops - Callbacks supported by CDX controller. + * @bus_enable: enable bus on the controller + * @bus_disable: disable bus on the controller * @scan: scan the devices on the controller * @dev_configure: configuration like reset, master_enable, * msi_config etc for a CDX device */ struct cdx_ops { + cdx_bus_enable_cb bus_enable; + cdx_bus_disable_cb bus_disable; cdx_scan_cb scan; cdx_dev_configure_cb dev_configure; }; @@ -63,12 +88,14 @@ struct cdx_ops { * @dev: Linux device associated with the CDX controller. * @priv: private data * @id: Controller ID + * @controller_registered: controller registered with bus * @ops: CDX controller ops */ struct cdx_controller { struct device *dev; void *priv; u32 id; + bool controller_registered; struct cdx_ops *ops; }; @@ -78,14 +105,21 @@ struct cdx_controller { * @cdx: CDX controller associated with the device * @vendor: Vendor ID for CDX device * @device: Device ID for CDX device + * @subsystem_vendor: Subsystem Vendor ID for CDX device + * @subsystem_device: Subsystem Device ID for CDX device + * @class: Class for the CDX device + * @revision: Revision of the CDX device * @bus_num: Bus number for this CDX device * @dev_num: Device number for this device * @res: array of MMIO region entries * @res_attr: resource binary attribute + * @debugfs_dir: debugfs directory for this device * @res_count: number of valid MMIO regions * @dma_mask: Default DMA mask * @flags: CDX device flags * @req_id: Requestor ID associated with CDX device + * @is_bus: Is this bus device + * @enabled: is this bus enabled * @driver_override: driver name to force a match; do not set directly, * because core frees it; use driver_set_override() to * set or clear it. @@ -95,19 +129,36 @@ struct cdx_device { struct cdx_controller *cdx; u16 vendor; u16 device; + u16 subsystem_vendor; + u16 subsystem_device; + u32 class; + u8 revision; u8 bus_num; u8 dev_num; struct resource res[MAX_CDX_DEV_RESOURCES]; + struct bin_attribute *res_attr[MAX_CDX_DEV_RESOURCES]; + struct dentry *debugfs_dir; u8 res_count; u64 dma_mask; u16 flags; u32 req_id; + bool is_bus; + bool enabled; const char *driver_override; }; #define to_cdx_device(_dev) \ container_of(_dev, struct cdx_device, dev) +#define cdx_resource_start(dev, num) ((dev)->res[(num)].start) +#define cdx_resource_end(dev, num) ((dev)->res[(num)].end) +#define cdx_resource_flags(dev, num) ((dev)->res[(num)].flags) +#define cdx_resource_len(dev, num) \ + ((cdx_resource_start((dev), (num)) == 0 && \ + cdx_resource_end((dev), (num)) == \ + cdx_resource_start((dev), (num))) ? 0 : \ + (cdx_resource_end((dev), (num)) - \ + cdx_resource_start((dev), (num)) + 1)) /** * struct cdx_driver - CDX device driver * @driver: Generic device driver @@ -170,4 +221,20 @@ extern struct bus_type cdx_bus_type; */ int cdx_dev_reset(struct device *dev); +/** + * cdx_set_master - enables bus-mastering for CDX device + * @cdx_dev: the CDX device to enable + * + * Return: 0 for success, -errno on failure + */ +int cdx_set_master(struct cdx_device *cdx_dev); + +/** + * cdx_clear_master - disables bus-mastering for CDX device + * @cdx_dev: the CDX device to disable + * + * Return: 0 for success, -errno on failure + */ +int cdx_clear_master(struct cdx_device *cdx_dev); + #endif /* _CDX_BUS_H_ */ diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h index d5a5da838caf..11a92a946016 100644 --- a/include/linux/ceph/ceph_debug.h +++ b/include/linux/ceph/ceph_debug.h @@ -19,12 +19,25 @@ pr_debug("%.*s %12.12s:%-4d : " fmt, \ 8 - (int)sizeof(KBUILD_MODNAME), " ", \ kbasename(__FILE__), __LINE__, ##__VA_ARGS__) +# define doutc(client, fmt, ...) \ + pr_debug("%.*s %12.12s:%-4d : [%pU %llu] " fmt, \ + 8 - (int)sizeof(KBUILD_MODNAME), " ", \ + kbasename(__FILE__), __LINE__, \ + &client->fsid, client->monc.auth->global_id, \ + ##__VA_ARGS__) # else /* faux printk call just to see any compiler warnings. */ # define dout(fmt, ...) do { \ if (0) \ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ } while (0) +# define doutc(client, fmt, ...) do { \ + if (0) \ + printk(KERN_DEBUG "[%pU %llu] " fmt, \ + &client->fsid, \ + client->monc.auth->global_id, \ + ##__VA_ARGS__); \ + } while (0) # endif #else @@ -33,7 +46,32 @@ * or, just wrap pr_debug */ # define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__) +# define doutc(client, fmt, ...) \ + pr_debug(" [%pU %llu] %s: " fmt, &client->fsid, \ + client->monc.auth->global_id, __func__, ##__VA_ARGS__) #endif +#define pr_notice_client(client, fmt, ...) \ + pr_notice("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_info_client(client, fmt, ...) \ + pr_info("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_warn_client(client, fmt, ...) \ + pr_warn("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_warn_once_client(client, fmt, ...) \ + pr_warn_once("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_err_client(client, fmt, ...) \ + pr_err("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_warn_ratelimited_client(client, fmt, ...) \ + pr_warn_ratelimited("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_err_ratelimited_client(client, fmt, ...) \ + pr_err_ratelimited("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) + #endif diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 49586ff26152..ee1d0e5f9789 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -357,16 +357,26 @@ enum { CEPH_MDS_OP_RENAMESNAP = 0x01403, }; -extern const char *ceph_mds_op_name(int op); +#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE || \ + op == CEPH_MDS_OP_MKNOD || \ + op == CEPH_MDS_OP_MKDIR || \ + op == CEPH_MDS_OP_SYMLINK) +extern const char *ceph_mds_op_name(int op); -#define CEPH_SETATTR_MODE 1 -#define CEPH_SETATTR_UID 2 -#define CEPH_SETATTR_GID 4 -#define CEPH_SETATTR_MTIME 8 -#define CEPH_SETATTR_ATIME 16 -#define CEPH_SETATTR_SIZE 32 -#define CEPH_SETATTR_CTIME 64 +#define CEPH_SETATTR_MODE (1 << 0) +#define CEPH_SETATTR_UID (1 << 1) +#define CEPH_SETATTR_GID (1 << 2) +#define CEPH_SETATTR_MTIME (1 << 3) +#define CEPH_SETATTR_ATIME (1 << 4) +#define CEPH_SETATTR_SIZE (1 << 5) +#define CEPH_SETATTR_CTIME (1 << 6) +#define CEPH_SETATTR_MTIME_NOW (1 << 7) +#define CEPH_SETATTR_ATIME_NOW (1 << 8) +#define CEPH_SETATTR_BTIME (1 << 9) +#define CEPH_SETATTR_KILL_SGUID (1 << 10) +#define CEPH_SETATTR_FSCRYPT_AUTH (1 << 11) +#define CEPH_SETATTR_FSCRYPT_FILE (1 << 12) /* * Ceph setxattr request flags. @@ -479,7 +489,7 @@ union ceph_mds_request_args_ext { #define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ #define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */ -struct ceph_mds_request_head_old { +struct ceph_mds_request_head_legacy { __le64 oldest_client_tid; __le32 mdsmap_epoch; /* on client */ __le32 flags; /* CEPH_MDS_FLAG_* */ @@ -492,9 +502,9 @@ struct ceph_mds_request_head_old { union ceph_mds_request_args args; } __attribute__ ((packed)); -#define CEPH_MDS_REQUEST_HEAD_VERSION 1 +#define CEPH_MDS_REQUEST_HEAD_VERSION 3 -struct ceph_mds_request_head { +struct ceph_mds_request_head_old { __le16 version; /* struct version */ __le64 oldest_client_tid; __le32 mdsmap_epoch; /* on client */ @@ -508,6 +518,26 @@ struct ceph_mds_request_head { union ceph_mds_request_args_ext args; } __attribute__ ((packed)); +struct ceph_mds_request_head { + __le16 version; /* struct version */ + __le64 oldest_client_tid; + __le32 mdsmap_epoch; /* on client */ + __le32 flags; /* CEPH_MDS_FLAG_* */ + __u8 num_retry, num_fwd; /* legacy count retry and fwd attempts */ + __le16 num_releases; /* # include cap/lease release records */ + __le32 op; /* mds op code */ + __le32 caller_uid, caller_gid; + __le64 ino; /* use this ino for openc, mkdir, mknod, + etc. (if replaying) */ + union ceph_mds_request_args_ext args; + + __le32 ext_num_retry; /* new count retry attempts */ + __le32 ext_num_fwd; /* new count fwd attempts */ + + __le32 struct_len; /* to store size of struct ceph_mds_request_head */ + __le32 owner_uid, owner_gid; /* used for OPs which create inodes */ +} __attribute__ ((packed)); + /* cap/lease release record */ struct ceph_mds_request_release { __le64 ino, cap_id; /* ino and unique cap id */ diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h deleted file mode 100644 index 4c3e0648dc27..000000000000 --- a/include/linux/ceph/mdsmap.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _FS_CEPH_MDSMAP_H -#define _FS_CEPH_MDSMAP_H - -#include <linux/bug.h> -#include <linux/ceph/types.h> - -/* - * mds map - describe servers in the mds cluster. - * - * we limit fields to those the client actually xcares about - */ -struct ceph_mds_info { - u64 global_id; - struct ceph_entity_addr addr; - s32 state; - int num_export_targets; - bool laggy; - u32 *export_targets; -}; - -struct ceph_mdsmap { - u32 m_epoch, m_client_epoch, m_last_failure; - u32 m_root; - u32 m_session_timeout; /* seconds */ - u32 m_session_autoclose; /* seconds */ - u64 m_max_file_size; - u64 m_max_xattr_size; /* maximum size for xattrs blob */ - u32 m_max_mds; /* expected up:active mds number */ - u32 m_num_active_mds; /* actual up:active mds number */ - u32 possible_max_rank; /* possible max rank index */ - struct ceph_mds_info *m_info; - - /* which object pools file data can be stored in */ - int m_num_data_pg_pools; - u64 *m_data_pg_pools; - u64 m_cas_pg_pool; - - bool m_enabled; - bool m_damaged; - int m_num_laggy; -}; - -static inline struct ceph_entity_addr * -ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) -{ - if (w >= m->possible_max_rank) - return NULL; - return &m->m_info[w].addr; -} - -static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) -{ - BUG_ON(w < 0); - if (w >= m->possible_max_rank) - return CEPH_MDS_STATE_DNE; - return m->m_info[w].state; -} - -static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) -{ - if (w >= 0 && w < m->possible_max_rank) - return m->m_info[w].laggy; - return false; -} - -extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); -struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2); -extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); -extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m); - -#endif diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 99c1726be6ee..1717cc57cdac 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -17,6 +17,7 @@ struct ceph_msg; struct ceph_connection; +struct ceph_msg_data_cursor; /* * Ceph defines these callbacks for handling connection events. @@ -70,6 +71,30 @@ struct ceph_connection_operations { int used_proto, int result, const int *allowed_protos, int proto_cnt, const int *allowed_modes, int mode_cnt); + + /** + * sparse_read: read sparse data + * @con: connection we're reading from + * @cursor: data cursor for reading extents + * @buf: optional buffer to read into + * + * This should be called more than once, each time setting up to + * receive an extent into the current cursor position, and zeroing + * the holes between them. + * + * Returns amount of data to be read (in bytes), 0 if reading is + * complete, or -errno if there was an error. + * + * If @buf is set on a >0 return, then the data should be read into + * the provided buffer. Otherwise, it should be read into the cursor. + * + * The sparse read operation is expected to initialize the cursor + * with a length covering up to the end of the last extent. + */ + int (*sparse_read)(struct ceph_connection *con, + struct ceph_msg_data_cursor *cursor, + char **buf); + }; /* use format string %s%lld */ @@ -98,6 +123,7 @@ enum ceph_msg_data_type { CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */ #endif /* CONFIG_BLOCK */ CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */ + CEPH_MSG_DATA_ITER, /* data source/destination is an iov_iter */ }; #ifdef CONFIG_BLOCK @@ -199,6 +225,7 @@ struct ceph_msg_data { bool own_pages; }; struct ceph_pagelist *pagelist; + struct iov_iter iter; }; }; @@ -207,6 +234,7 @@ struct ceph_msg_data_cursor { struct ceph_msg_data *data; /* current data item */ size_t resid; /* bytes not yet consumed */ + int sr_resid; /* residual sparse_read len */ bool need_crc; /* crc update needed */ union { #ifdef CONFIG_BLOCK @@ -222,6 +250,10 @@ struct ceph_msg_data_cursor { struct page *page; /* page from list */ size_t offset; /* bytes from list */ }; + struct { + struct iov_iter iov_iter; + unsigned int lastlen; + }; }; }; @@ -251,6 +283,7 @@ struct ceph_msg { struct kref kref; bool more_to_follow; bool needs_out_seq; + u64 sparse_read_total; int front_alloc_len; struct ceph_msgpool *pool; @@ -309,6 +342,10 @@ struct ceph_connection_v1_info { int in_base_pos; /* bytes read */ + /* sparse reads */ + struct kvec in_sr_kvec; /* current location to receive into */ + u64 in_sr_len; /* amount of data in this extent */ + /* message in temps */ u8 in_tag; /* protocol control byte */ struct ceph_msg_header in_hdr; @@ -395,6 +432,7 @@ struct ceph_connection_v2_info { void *conn_bufs[16]; int conn_buf_cnt; + int data_len_remain; struct kvec in_sign_kvecs[8]; struct kvec out_sign_kvecs[8]; @@ -573,6 +611,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, #endif /* CONFIG_BLOCK */ void ceph_msg_data_add_bvecs(struct ceph_msg *msg, struct ceph_bvec_iter *bvec_pos); +void ceph_msg_data_add_iter(struct ceph_msg *msg, + struct iov_iter *iter); struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items, gfp_t flags, bool can_fail); diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index b658961156a0..7a9a40163c0f 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -19,7 +19,7 @@ struct ceph_monmap { struct ceph_fsid fsid; u32 epoch; u32 num_mon; - struct ceph_entity_inst mon_inst[]; + struct ceph_entity_inst mon_inst[] __counted_by(num_mon); }; struct ceph_mon_client; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index fb6be72104df..f66f6aac74f6 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -29,14 +29,63 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); #define CEPH_HOMELESS_OSD -1 -/* a given osd we're communicating with */ +/* + * A single extent in a SPARSE_READ reply. + * + * Note that these come from the OSD as little-endian values. On BE arches, + * we convert them in-place after receipt. + */ +struct ceph_sparse_extent { + u64 off; + u64 len; +} __packed; + +/* Sparse read state machine state values */ +enum ceph_sparse_read_state { + CEPH_SPARSE_READ_HDR = 0, + CEPH_SPARSE_READ_EXTENTS, + CEPH_SPARSE_READ_DATA_LEN, + CEPH_SPARSE_READ_DATA_PRE, + CEPH_SPARSE_READ_DATA, +}; + +/* + * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of + * 64-bit offset/length pairs, and then all of the actual file data + * concatenated after it (sans holes). + * + * Unfortunately, we don't know how long the extent array is until we've + * started reading the data section of the reply. The caller should send down + * a destination buffer for the array, but we'll alloc one if it's too small + * or if the caller doesn't. + */ +struct ceph_sparse_read { + enum ceph_sparse_read_state sr_state; /* state machine state */ + u64 sr_req_off; /* orig request offset */ + u64 sr_req_len; /* orig request length */ + u64 sr_pos; /* current pos in buffer */ + int sr_index; /* current extent index */ + u32 sr_datalen; /* length of actual data */ + u32 sr_count; /* extent count in reply */ + int sr_ext_len; /* length of extent array */ + struct ceph_sparse_extent *sr_extent; /* extent array */ +}; + +/* + * A given osd we're communicating with. + * + * Note that the o_requests tree can be searched while holding the "lock" mutex + * or the "o_requests_lock" spinlock. Insertion or removal requires both! + */ struct ceph_osd { refcount_t o_ref; + int o_sparse_op_idx; struct ceph_osd_client *o_osdc; int o_osd; int o_incarnation; struct rb_node o_node; struct ceph_connection o_con; + spinlock_t o_requests_lock; struct rb_root o_requests; struct rb_root o_linger_requests; struct rb_root o_backoff_mappings; @@ -46,6 +95,7 @@ struct ceph_osd { unsigned long lru_ttl; struct list_head o_keepalive_item; struct mutex lock; + struct ceph_sparse_read o_sparse_read; }; #define CEPH_OSD_SLAB_OPS 2 @@ -59,6 +109,7 @@ enum ceph_osd_data_type { CEPH_OSD_DATA_TYPE_BIO, #endif /* CONFIG_BLOCK */ CEPH_OSD_DATA_TYPE_BVECS, + CEPH_OSD_DATA_TYPE_ITER, }; struct ceph_osd_data { @@ -82,6 +133,7 @@ struct ceph_osd_data { struct ceph_bvec_iter bvec_pos; u32 num_bvecs; }; + struct iov_iter iter; }; }; @@ -98,6 +150,8 @@ struct ceph_osd_req_op { u64 offset, length; u64 truncate_size; u32 truncate_seq; + int sparse_ext_cnt; + struct ceph_sparse_extent *sparse_ext; struct ceph_osd_data osd_data; } extent; struct { @@ -145,6 +199,9 @@ struct ceph_osd_req_op { u32 src_fadvise_flags; struct ceph_osd_data osd_data; } copy_from; + struct { + u64 ver; + } assert_ver; }; }; @@ -199,6 +256,7 @@ struct ceph_osd_request { struct ceph_osd_client *r_osdc; struct kref r_kref; bool r_mempool; + bool r_linger; /* don't resend on failure */ struct completion r_completion; /* private to osd_client.c */ ceph_osdc_callback_t r_callback; @@ -211,9 +269,9 @@ struct ceph_osd_request { struct ceph_snap_context *r_snapc; /* for writes */ struct timespec64 r_mtime; /* ditto */ u64 r_data_offset; /* ditto */ - bool r_linger; /* don't resend on failure */ /* internal */ + u64 r_version; /* data version sent in reply */ unsigned long r_stamp; /* jiffies, send or check time */ unsigned long r_start_stamp; /* jiffies */ ktime_t r_start_latency; /* ktime_t */ @@ -221,7 +279,7 @@ struct ceph_osd_request { int r_attempts; u32 r_map_dne_bound; - struct ceph_osd_req_op r_ops[]; + struct ceph_osd_req_op r_ops[] __counted_by(r_num_ops); }; struct ceph_request_redirect { @@ -450,6 +508,8 @@ void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, unsigned int which, struct ceph_bvec_iter *bvec_pos); +void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req, + unsigned int which, struct iov_iter *iter); extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *, unsigned int which, @@ -504,6 +564,23 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, u32 truncate_seq, u64 truncate_size, bool use_mempool); +int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt); + +/* + * How big an extent array should we preallocate for a sparse read? This is + * just a starting value. If we get more than this back from the OSD, the + * receiver will reallocate. + */ +#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16 + +static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt) +{ + if (!cnt) + cnt = CEPH_SPARSE_EXT_ARRAY_INITIAL; + + return __ceph_alloc_sparse_ext_map(op, cnt); +} + extern void ceph_osdc_get_request(struct ceph_osd_request *req); extern void ceph_osdc_put_request(struct ceph_osd_request *req); @@ -558,5 +635,19 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, struct ceph_object_locator *oloc, struct ceph_watch_item **watchers, u32 *num_watchers); -#endif +/* Find offset into the buffer of the end of the extent map */ +static inline u64 ceph_sparse_ext_map_end(struct ceph_osd_req_op *op) +{ + struct ceph_sparse_extent *ext; + + /* No extents? No data */ + if (op->extent.sparse_ext_cnt == 0) + return 0; + + ext = &op->extent.sparse_ext[op->extent.sparse_ext_cnt - 1]; + + return ext->off + ext->len - op->extent.offset; +} + +#endif diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 43a7a1573b51..73c3efbec36c 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -524,6 +524,10 @@ struct ceph_osd_op { __le64 cookie; } __attribute__ ((packed)) notify; struct { + __le64 unused; + __le64 ver; + } __attribute__ ((packed)) assert_ver; + struct { __le64 offset, length; __le64 src_offset; } __attribute__ ((packed)) clonerange; diff --git a/include/linux/cfi.h b/include/linux/cfi.h index 3552ec82b725..f0df518e11dd 100644 --- a/include/linux/cfi.h +++ b/include/linux/cfi.h @@ -9,6 +9,14 @@ #include <linux/bug.h> #include <linux/module.h> +#include <asm/cfi.h> + +#ifndef cfi_get_offset +static inline int cfi_get_offset(void) +{ + return 0; +} +#endif #ifdef CONFIG_CFI_CLANG enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr, @@ -38,4 +46,8 @@ static inline void module_cfi_finalize(const Elf_Ehdr *hdr, #endif /* CONFIG_ARCH_USES_CFI_TRAPS */ #endif /* CONFIG_MODULES */ +#ifndef CFI_NOSEAL +#define CFI_NOSEAL(x) +#endif + #endif /* _LINUX_CFI_H */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index f1b3151ac30b..ea48c861cd36 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -115,6 +115,11 @@ enum { * Enable recursive subtree protection */ CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18), + + /* + * Enable hugetlb accounting for the memory controller. + */ + CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19), }; /* cftype->flags */ @@ -238,7 +243,7 @@ struct css_set { * Lists running through all tasks using this cgroup group. * mg_tasks lists tasks which belong to this cset but are in the * process of being migrated out or in. Protected by - * css_set_rwsem, but, during migration, once tasks are moved to + * css_set_lock, but, during migration, once tasks are moved to * mg_tasks, it can be read safely while holding cgroup_mutex. */ struct list_head tasks; @@ -491,6 +496,20 @@ struct cgroup { struct cgroup_rstat_cpu __percpu *rstat_cpu; struct list_head rstat_css_list; + /* + * Add padding to separate the read mostly rstat_cpu and + * rstat_css_list into a different cacheline from the following + * rstat_flush_next and *bstat fields which can have frequent updates. + */ + CACHELINE_PADDING(_pad_); + + /* + * A singly-linked list of cgroup structures to be rstat flushed. + * This is a scratch field to be used exclusively by + * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock. + */ + struct cgroup *rstat_flush_next; + /* cgroup basic resource statistics */ struct cgroup_base_stat last_bstat; struct cgroup_base_stat bstat; @@ -543,6 +562,10 @@ struct cgroup_root { /* Unique id for this hierarchy. */ int hierarchy_id; + /* A list running through the active hierarchies */ + struct list_head root_list; + struct rcu_head rcu; /* Must be near the top */ + /* * The root cgroup. The containing cgroup_root will be destroyed on its * release. cgrp->ancestors[0] will be used overflowing into the @@ -556,9 +579,6 @@ struct cgroup_root { /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ atomic_t nr_cgrps; - /* A list running through the active hierarchies */ - struct list_head root_list; - /* Hierarchy-specific flags */ unsigned int flags; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b307013b9c6c..34aaf0e87def 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -40,13 +40,11 @@ struct kernel_clone_args; #define CGROUP_WEIGHT_DFL 100 #define CGROUP_WEIGHT_MAX 10000 -/* walk only threadgroup leaders */ -#define CSS_TASK_ITER_PROCS (1U << 0) -/* walk all threaded css_sets in the domain */ -#define CSS_TASK_ITER_THREADED (1U << 1) - -/* internal flags */ -#define CSS_TASK_ITER_SKIPPED (1U << 16) +enum { + CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */ + CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */ + CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */ +}; /* a css_task_iter should be treated as an opaque object */ struct css_task_iter { @@ -71,6 +69,7 @@ struct css_task_iter { extern struct file_system_type cgroup_fs_type; extern struct cgroup_root cgrp_dfl_root; extern struct css_set init_css_set; +extern spinlock_t css_set_lock; #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; #include <linux/cgroup_subsys.h> @@ -388,7 +387,6 @@ static inline void cgroup_unlock(void) * as locks used during the cgroup_subsys::attach() methods. */ #ifdef CONFIG_PROVE_RCU -extern spinlock_t css_set_lock; #define task_css_set_check(task, __c) \ rcu_dereference_check((task)->cgroups, \ rcu_read_lock_sched_held() || \ @@ -855,4 +853,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {} #endif /* CONFIG_CGROUP_BPF */ +struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id); + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index 53f1a7a932b0..c2d09bc4f976 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -7,8 +7,9 @@ /* * DEFINE_FREE(name, type, free): * simple helper macro that defines the required wrapper for a __free() - * based cleanup function. @free is an expression using '_T' to access - * the variable. + * based cleanup function. @free is an expression using '_T' to access the + * variable. @free should typically include a NULL test before calling a + * function, see the example below. * * __free(name): * variable attribute to add a scoped based cleanup to the variable. @@ -17,6 +18,9 @@ * like a non-atomic xchg(var, NULL), such that the cleanup function will * be inhibited -- provided it sanely deals with a NULL value. * + * NOTE: this has __must_check semantics so that it is harder to accidentally + * leak the resource. + * * return_ptr(p): * returns p while inhibiting the __free(). * @@ -24,6 +28,8 @@ * * DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) * + * void *alloc_obj(...) + * { * struct obj *p __free(kfree) = kmalloc(...); * if (!p) * return NULL; @@ -32,6 +38,24 @@ * return NULL; * * return_ptr(p); + * } + * + * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though + * kfree() is fine to be called with a NULL value. This is on purpose. This way + * the compiler sees the end of our alloc_obj() function as: + * + * tmp = p; + * p = NULL; + * if (p) + * kfree(p); + * return tmp; + * + * And through the magic of value-propagation and dead-code-elimination, it + * eliminates the actual cleanup call and compiles into: + * + * return p; + * + * Without the NULL test it turns into a mess and the compiler can't help us. */ #define DEFINE_FREE(_name, _type, _free) \ @@ -39,8 +63,17 @@ #define __free(_name) __cleanup(__free_##_name) +#define __get_and_null_ptr(p) \ + ({ __auto_type __ptr = &(p); \ + __auto_type __val = *__ptr; \ + *__ptr = NULL; __val; }) + +static inline __must_check +const volatile void * __must_check_fn(const volatile void *val) +{ return val; } + #define no_free_ptr(p) \ - ({ __auto_type __ptr = (p); (p) = NULL; __ptr; }) + ((typeof(p)) __must_check_fn(__get_and_null_ptr(p))) #define return_ptr(p) return no_free_ptr(p) @@ -92,25 +125,55 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ * trivial wrapper around DEFINE_CLASS() above specifically * for locks. * + * DEFINE_GUARD_COND(name, ext, condlock) + * wrapper around EXTEND_CLASS above to add conditional lock + * variants to a base class, eg. mutex_trylock() or + * mutex_lock_interruptible(). + * * guard(name): - * an anonymous instance of the (guard) class + * an anonymous instance of the (guard) class, not recommended for + * conditional locks. * * scoped_guard (name, args...) { }: * similar to CLASS(name, scope)(args), except the variable (with the * explicit name 'scope') is declard in a for-loop such that its scope is * bound to the next (compound) statement. * + * for conditional locks the loop body is skipped when the lock is not + * acquired. + * + * scoped_cond_guard (name, fail, args...) { }: + * similar to scoped_guard(), except it does fail when the lock + * acquire fails. + * */ #define DEFINE_GUARD(_name, _type, _lock, _unlock) \ - DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T) + DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \ + static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \ + { return *_T; } + +#define DEFINE_GUARD_COND(_name, _ext, _condlock) \ + EXTEND_CLASS(_name, _ext, \ + ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \ + class_##_name##_t _T) \ + static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \ + { return class_##_name##_lock_ptr(_T); } #define guard(_name) \ CLASS(_name, __UNIQUE_ID(guard)) +#define __guard_ptr(_name) class_##_name##_lock_ptr + #define scoped_guard(_name, args...) \ for (CLASS(_name, scope)(args), \ - *done = NULL; !done; done = (void *)1) + *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1) + +#define scoped_cond_guard(_name, _fail, args...) \ + for (CLASS(_name, scope)(args), \ + *done = NULL; !done; done = (void *)1) \ + if (!__guard_ptr(_name)(&scope)) _fail; \ + else /* * Additional helper macros for generating lock guards with types, either for @@ -119,6 +182,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ * * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...) * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...) + * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock) * * will result in the following type: * @@ -140,6 +204,11 @@ typedef struct { \ static inline void class_##_name##_destructor(class_##_name##_t *_T) \ { \ if (_T->lock) { _unlock; } \ +} \ + \ +static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \ +{ \ + return _T->lock; \ } @@ -168,4 +237,14 @@ __DEFINE_LOCK_GUARD_1(_name, _type, _lock) __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \ __DEFINE_LOCK_GUARD_0(_name, _lock) +#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \ + EXTEND_CLASS(_name, _ext, \ + ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\ + if (_T->lock && !(_condlock)) _T->lock = NULL; \ + _t; }), \ + typeof_member(class_##_name##_t, lock) l) \ + static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \ + { return class_##_name##_lock_ptr(_T); } + + #endif /* __LINUX_GUARDS_H */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index ec32ec58c59f..1293c38ddb7f 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -74,7 +74,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core, unsigned long parent_rate); /** - * struct clk_duty - Struture encoding the duty cycle ratio of a clock + * struct clk_duty - Structure encoding the duty cycle ratio of a clock * * @num: Numerator of the duty cycle ratio * @den: Denominator of the duty cycle ratio @@ -129,7 +129,7 @@ struct clk_duty { * @restore_context: Restore the context of the clock after a restoration * of power. * - * @recalc_rate Recalculate the rate of this clock, by querying hardware. The + * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The * parent rate is an input parameter. It is up to the caller to * ensure that the prepare_mutex is held across this call. If the * driver cannot figure out a rate for this clock, it must return @@ -448,15 +448,15 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name, */ #define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \ parent_hw, flags, fixed_rate, fixed_accuracy) \ - __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \ - NULL, NULL, (flags), (fixed_rate), \ + __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \ + NULL, (flags), (fixed_rate), \ (fixed_accuracy), 0, false) /** * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate * clock with the clock framework * @dev: device that is registering this clock * @name: name of this clock - * @parent_name: name of clock's parent + * @parent_data: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate * @fixed_accuracy: non-adjustable clock accuracy @@ -471,7 +471,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name, * the clock framework * @dev: device that is registering this clock * @name: name of this clock - * @parent_name: name of clock's parent + * @parent_data: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate */ @@ -649,7 +649,7 @@ struct clk_div_table { * Clock with an adjustable divider affecting its output frequency. Implements * .recalc_rate, .set_rate and .round_rate * - * Flags: + * @flags: * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is * the raw value read from the register, with the value of zero considered @@ -1130,11 +1130,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev, * @mwidth: width of the numerator bit field * @nshift: shift to the denominator bit field * @nwidth: width of the denominator bit field + * @approximation: clk driver's callback for calculating the divider clock * @lock: register lock * * Clock with adjustable fractional divider affecting its output frequency. * - * Flags: + * @flags: * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED * is set then the numerator and denominator are both the value read @@ -1191,7 +1192,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw); * Clock with an adjustable multiplier affecting its output frequency. * Implements .recalc_rate, .set_rate and .round_rate * - * Flags: + * @flags: * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read * from the register, with 0 being a valid value effectively * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is diff --git a/include/linux/closure.h b/include/linux/closure.h new file mode 100644 index 000000000000..c554c6a08768 --- /dev/null +++ b/include/linux/closure.h @@ -0,0 +1,415 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CLOSURE_H +#define _LINUX_CLOSURE_H + +#include <linux/llist.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/workqueue.h> + +/* + * Closure is perhaps the most overused and abused term in computer science, but + * since I've been unable to come up with anything better you're stuck with it + * again. + * + * What are closures? + * + * They embed a refcount. The basic idea is they count "things that are in + * progress" - in flight bios, some other thread that's doing something else - + * anything you might want to wait on. + * + * The refcount may be manipulated with closure_get() and closure_put(). + * closure_put() is where many of the interesting things happen, when it causes + * the refcount to go to 0. + * + * Closures can be used to wait on things both synchronously and asynchronously, + * and synchronous and asynchronous use can be mixed without restriction. To + * wait synchronously, use closure_sync() - you will sleep until your closure's + * refcount hits 1. + * + * To wait asynchronously, use + * continue_at(cl, next_function, workqueue); + * + * passing it, as you might expect, the function to run when nothing is pending + * and the workqueue to run that function out of. + * + * continue_at() also, critically, requires a 'return' immediately following the + * location where this macro is referenced, to return to the calling function. + * There's good reason for this. + * + * To use safely closures asynchronously, they must always have a refcount while + * they are running owned by the thread that is running them. Otherwise, suppose + * you submit some bios and wish to have a function run when they all complete: + * + * foo_endio(struct bio *bio) + * { + * closure_put(cl); + * } + * + * closure_init(cl); + * + * do_stuff(); + * closure_get(cl); + * bio1->bi_endio = foo_endio; + * bio_submit(bio1); + * + * do_more_stuff(); + * closure_get(cl); + * bio2->bi_endio = foo_endio; + * bio_submit(bio2); + * + * continue_at(cl, complete_some_read, system_wq); + * + * If closure's refcount started at 0, complete_some_read() could run before the + * second bio was submitted - which is almost always not what you want! More + * importantly, it wouldn't be possible to say whether the original thread or + * complete_some_read()'s thread owned the closure - and whatever state it was + * associated with! + * + * So, closure_init() initializes a closure's refcount to 1 - and when a + * closure_fn is run, the refcount will be reset to 1 first. + * + * Then, the rule is - if you got the refcount with closure_get(), release it + * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount + * on a closure because you called closure_init() or you were run out of a + * closure - _always_ use continue_at(). Doing so consistently will help + * eliminate an entire class of particularly pernicious races. + * + * Lastly, you might have a wait list dedicated to a specific event, and have no + * need for specifying the condition - you just want to wait until someone runs + * closure_wake_up() on the appropriate wait list. In that case, just use + * closure_wait(). It will return either true or false, depending on whether the + * closure was already on a wait list or not - a closure can only be on one wait + * list at a time. + * + * Parents: + * + * closure_init() takes two arguments - it takes the closure to initialize, and + * a (possibly null) parent. + * + * If parent is non null, the new closure will have a refcount for its lifetime; + * a closure is considered to be "finished" when its refcount hits 0 and the + * function to run is null. Hence + * + * continue_at(cl, NULL, NULL); + * + * returns up the (spaghetti) stack of closures, precisely like normal return + * returns up the C stack. continue_at() with non null fn is better thought of + * as doing a tail call. + * + * All this implies that a closure should typically be embedded in a particular + * struct (which its refcount will normally control the lifetime of), and that + * struct can very much be thought of as a stack frame. + */ + +struct closure; +struct closure_syncer; +typedef void (closure_fn) (struct work_struct *); +extern struct dentry *bcache_debug; + +struct closure_waitlist { + struct llist_head list; +}; + +enum closure_state { + /* + * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by + * the thread that owns the closure, and cleared by the thread that's + * waking up the closure. + * + * The rest are for debugging and don't affect behaviour: + * + * CLOSURE_RUNNING: Set when a closure is running (i.e. by + * closure_init() and when closure_put() runs then next function), and + * must be cleared before remaining hits 0. Primarily to help guard + * against incorrect usage and accidentally transferring references. + * continue_at() and closure_return() clear it for you, if you're doing + * something unusual you can use closure_set_dead() which also helps + * annotate where references are being transferred. + */ + + CLOSURE_BITS_START = (1U << 26), + CLOSURE_DESTRUCTOR = (1U << 26), + CLOSURE_WAITING = (1U << 28), + CLOSURE_RUNNING = (1U << 30), +}; + +#define CLOSURE_GUARD_MASK \ + ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1) + +#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1) +#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING) + +struct closure { + union { + struct { + struct workqueue_struct *wq; + struct closure_syncer *s; + struct llist_node list; + closure_fn *fn; + }; + struct work_struct work; + }; + + struct closure *parent; + + atomic_t remaining; + bool closure_get_happened; + +#ifdef CONFIG_DEBUG_CLOSURES +#define CLOSURE_MAGIC_DEAD 0xc054dead +#define CLOSURE_MAGIC_ALIVE 0xc054a11e + + unsigned int magic; + struct list_head all; + unsigned long ip; + unsigned long waiting_on; +#endif +}; + +void closure_sub(struct closure *cl, int v); +void closure_put(struct closure *cl); +void __closure_wake_up(struct closure_waitlist *list); +bool closure_wait(struct closure_waitlist *list, struct closure *cl); +void __closure_sync(struct closure *cl); + +static inline unsigned closure_nr_remaining(struct closure *cl) +{ + return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK; +} + +/** + * closure_sync - sleep until a closure a closure has nothing left to wait on + * + * Sleeps until the refcount hits 1 - the thread that's running the closure owns + * the last refcount. + */ +static inline void closure_sync(struct closure *cl) +{ +#ifdef CONFIG_DEBUG_CLOSURES + BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened); +#endif + + if (cl->closure_get_happened) + __closure_sync(cl); +} + +#ifdef CONFIG_DEBUG_CLOSURES + +void closure_debug_create(struct closure *cl); +void closure_debug_destroy(struct closure *cl); + +#else + +static inline void closure_debug_create(struct closure *cl) {} +static inline void closure_debug_destroy(struct closure *cl) {} + +#endif + +static inline void closure_set_ip(struct closure *cl) +{ +#ifdef CONFIG_DEBUG_CLOSURES + cl->ip = _THIS_IP_; +#endif +} + +static inline void closure_set_ret_ip(struct closure *cl) +{ +#ifdef CONFIG_DEBUG_CLOSURES + cl->ip = _RET_IP_; +#endif +} + +static inline void closure_set_waiting(struct closure *cl, unsigned long f) +{ +#ifdef CONFIG_DEBUG_CLOSURES + cl->waiting_on = f; +#endif +} + +static inline void closure_set_stopped(struct closure *cl) +{ + atomic_sub(CLOSURE_RUNNING, &cl->remaining); +} + +static inline void set_closure_fn(struct closure *cl, closure_fn *fn, + struct workqueue_struct *wq) +{ + closure_set_ip(cl); + cl->fn = fn; + cl->wq = wq; +} + +static inline void closure_queue(struct closure *cl) +{ + struct workqueue_struct *wq = cl->wq; + /** + * Changes made to closure, work_struct, or a couple of other structs + * may cause work.func not pointing to the right location. + */ + BUILD_BUG_ON(offsetof(struct closure, fn) + != offsetof(struct work_struct, func)); + + if (wq) { + INIT_WORK(&cl->work, cl->work.func); + BUG_ON(!queue_work(wq, &cl->work)); + } else + cl->fn(&cl->work); +} + +/** + * closure_get - increment a closure's refcount + */ +static inline void closure_get(struct closure *cl) +{ + cl->closure_get_happened = true; + +#ifdef CONFIG_DEBUG_CLOSURES + BUG_ON((atomic_inc_return(&cl->remaining) & + CLOSURE_REMAINING_MASK) <= 1); +#else + atomic_inc(&cl->remaining); +#endif +} + +/** + * closure_init - Initialize a closure, setting the refcount to 1 + * @cl: closure to initialize + * @parent: parent of the new closure. cl will take a refcount on it for its + * lifetime; may be NULL. + */ +static inline void closure_init(struct closure *cl, struct closure *parent) +{ + cl->fn = NULL; + cl->parent = parent; + if (parent) + closure_get(parent); + + atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); + cl->closure_get_happened = false; + + closure_debug_create(cl); + closure_set_ip(cl); +} + +static inline void closure_init_stack(struct closure *cl) +{ + memset(cl, 0, sizeof(struct closure)); + atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); +} + +/** + * closure_wake_up - wake up all closures on a wait list, + * with memory barrier + */ +static inline void closure_wake_up(struct closure_waitlist *list) +{ + /* Memory barrier for the wait list */ + smp_mb(); + __closure_wake_up(list); +} + +#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws) +#define closure_type(name, type, member) \ + struct closure *cl = container_of(ws, struct closure, work); \ + type *name = container_of(cl, type, member) + +/** + * continue_at - jump to another function with barrier + * + * After @cl is no longer waiting on anything (i.e. all outstanding refs have + * been dropped with closure_put()), it will resume execution at @fn running out + * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly). + * + * This is because after calling continue_at() you no longer have a ref on @cl, + * and whatever @cl owns may be freed out from under you - a running closure fn + * has a ref on its own closure which continue_at() drops. + * + * Note you are expected to immediately return after using this macro. + */ +#define continue_at(_cl, _fn, _wq) \ +do { \ + set_closure_fn(_cl, _fn, _wq); \ + closure_sub(_cl, CLOSURE_RUNNING + 1); \ +} while (0) + +/** + * closure_return - finish execution of a closure + * + * This is used to indicate that @cl is finished: when all outstanding refs on + * @cl have been dropped @cl's ref on its parent closure (as passed to + * closure_init()) will be dropped, if one was specified - thus this can be + * thought of as returning to the parent closure. + */ +#define closure_return(_cl) continue_at((_cl), NULL, NULL) + +/** + * continue_at_nobarrier - jump to another function without barrier + * + * Causes @fn to be executed out of @cl, in @wq context (or called directly if + * @wq is NULL). + * + * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn, + * thus it's not safe to touch anything protected by @cl after a + * continue_at_nobarrier(). + */ +#define continue_at_nobarrier(_cl, _fn, _wq) \ +do { \ + set_closure_fn(_cl, _fn, _wq); \ + closure_queue(_cl); \ +} while (0) + +/** + * closure_return_with_destructor - finish execution of a closure, + * with destructor + * + * Works like closure_return(), except @destructor will be called when all + * outstanding refs on @cl have been dropped; @destructor may be used to safely + * free the memory occupied by @cl, and it is called with the ref on the parent + * closure still held - so @destructor could safely return an item to a + * freelist protected by @cl's parent. + */ +#define closure_return_with_destructor(_cl, _destructor) \ +do { \ + set_closure_fn(_cl, _destructor, NULL); \ + closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ +} while (0) + +/** + * closure_call - execute @fn out of a new, uninitialized closure + * + * Typically used when running out of one closure, and we want to run @fn + * asynchronously out of a new closure - @parent will then wait for @cl to + * finish. + */ +static inline void closure_call(struct closure *cl, closure_fn fn, + struct workqueue_struct *wq, + struct closure *parent) +{ + closure_init(cl, parent); + continue_at_nobarrier(cl, fn, wq); +} + +#define __closure_wait_event(waitlist, _cond) \ +do { \ + struct closure cl; \ + \ + closure_init_stack(&cl); \ + \ + while (1) { \ + closure_wait(waitlist, &cl); \ + if (_cond) \ + break; \ + closure_sync(&cl); \ + } \ + closure_wake_up(waitlist); \ + closure_sync(&cl); \ +} while (0) + +#define closure_wait_event(waitlist, _cond) \ +do { \ + if (!(_cond)) \ + __closure_wait_event(waitlist, _cond); \ +} while (0) + +#endif /* _LINUX_CLOSURE_H */ diff --git a/include/linux/comedi/comedi_8254.h b/include/linux/comedi/comedi_8254.h index d8264417e53c..d527f04400df 100644 --- a/include/linux/comedi/comedi_8254.h +++ b/include/linux/comedi/comedi_8254.h @@ -12,6 +12,8 @@ #define _COMEDI_8254_H #include <linux/types.h> +#include <linux/errno.h> +#include <linux/err.h> struct comedi_device; struct comedi_insn; @@ -57,10 +59,24 @@ struct comedi_subdevice; /* counter maps zero to 0x10000 */ #define I8254_MAX_COUNT 0x10000 +struct comedi_8254; + +/** + * typedef comedi_8254_iocb_fn - call-back function type for 8254 register access + * @i8254: pointer to struct comedi_8254 + * @dir: direction (0 = read, 1 = write) + * @reg: register number + * @val: value to write + * + * Return: Register value when reading, 0 when writing. + */ +typedef unsigned int comedi_8254_iocb_fn(struct comedi_8254 *i8254, int dir, + unsigned int reg, unsigned int val); + /** * struct comedi_8254 - private data used by this module - * @iobase: PIO base address of the registers (in/out) - * @mmio: MMIO base address of the registers (read/write) + * @iocb: I/O call-back function for register access + * @context: context for register access (e.g. a base address) * @iosize: I/O size used to access the registers (b/w/l) * @regshift: register gap shift * @osc_base: cascaded oscillator speed in ns @@ -76,8 +92,8 @@ struct comedi_subdevice; * @insn_config: driver specific (*insn_config) callback */ struct comedi_8254 { - unsigned long iobase; - void __iomem *mmio; + comedi_8254_iocb_fn *iocb; + unsigned long context; unsigned int iosize; unsigned int regshift; unsigned int osc_base; @@ -122,13 +138,24 @@ void comedi_8254_set_busy(struct comedi_8254 *i8254, void comedi_8254_subdevice_init(struct comedi_subdevice *s, struct comedi_8254 *i8254); -struct comedi_8254 *comedi_8254_init(unsigned long iobase, - unsigned int osc_base, - unsigned int iosize, - unsigned int regshift); -struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio, - unsigned int osc_base, - unsigned int iosize, - unsigned int regshift); +#ifdef CONFIG_HAS_IOPORT +struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase, + unsigned int osc_base, + unsigned int iosize, + unsigned int regshift); +#else +static inline struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase, + unsigned int osc_base, + unsigned int iosize, + unsigned int regshift) +{ + return ERR_PTR(-ENXIO); +} +#endif + +struct comedi_8254 *comedi_8254_mm_alloc(void __iomem *mmio, + unsigned int osc_base, + unsigned int iosize, + unsigned int regshift); #endif /* _COMEDI_8254_H */ diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h index b2a5bc6b3a49..d24a69da389b 100644 --- a/include/linux/comedi/comedi_8255.h +++ b/include/linux/comedi/comedi_8255.h @@ -10,6 +10,8 @@ #ifndef _COMEDI_8255_H #define _COMEDI_8255_H +#include <linux/errno.h> + #define I8255_SIZE 0x04 #define I8255_DATA_A_REG 0x00 @@ -27,16 +29,26 @@ struct comedi_device; struct comedi_subdevice; -int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s, - int (*io)(struct comedi_device *dev, int dir, int port, - int data, unsigned long regbase), - unsigned long regbase); +#ifdef CONFIG_HAS_IOPORT +int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s, + unsigned long regbase); +#else +static inline int subdev_8255_io_init(struct comedi_device *dev, + struct comedi_subdevice *s, + unsigned long regbase) +{ + return -ENXIO; +} +#endif int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s, - int (*io)(struct comedi_device *dev, int dir, int port, - int data, unsigned long regbase), unsigned long regbase); +int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s, + int (*io)(struct comedi_device *dev, int dir, int port, + int data, unsigned long context), + unsigned long context); + unsigned long subdev_8255_regbase(struct comedi_subdevice *s); #endif diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h index 0a1150900ef3..c08416a7364b 100644 --- a/include/linux/comedi/comedidev.h +++ b/include/linux/comedi/comedidev.h @@ -633,7 +633,7 @@ extern const struct comedi_lrange range_unknown; */ struct comedi_lrange { int length; - struct comedi_krange range[]; + struct comedi_krange range[] __counted_by(length); }; /** diff --git a/include/linux/compat.h b/include/linux/compat.h index 1cfa4f0f490a..233f61ec8afc 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -581,7 +581,6 @@ asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id, struct io_event __user *events, struct __kernel_timespec __user *timeout, const struct __compat_aio_sigset __user *usig); -asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); asmlinkage long compat_sys_epoll_pwait(int epfd, struct epoll_event __user *events, int maxevents, int timeout, diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 9b673fefcef8..ddab1ef22bee 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -14,11 +14,6 @@ #undef __cleanup #define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func))) -/* same as gcc, this was present in clang-2.6 so we can assume it works - * with any version that can compile the kernel - */ -#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) - /* all clang versions usable with the kernel support KASAN ABI version 5 */ #define KASAN_ABI_VERSION 5 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 7af9e34ec261..c1a963be7d28 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -39,8 +39,6 @@ #define __noretpoline __attribute__((__indirect_branch__("keep"))) #endif -#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) - #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) #define __latent_entropy __attribute__((latent_entropy)) #endif @@ -66,6 +64,25 @@ __builtin_unreachable(); \ } while (0) +/* + * GCC 'asm goto' with outputs miscompiles certain code sequences: + * + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110420 + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110422 + * + * Work it around via the same compiler barrier quirk that we used + * to use for the old 'asm goto' workaround. + * + * Also, always mark such 'asm goto' statements as volatile: all + * asm goto statements are supposed to be volatile as per the + * documentation, but some versions of gcc didn't actually do + * that for asms with outputs: + * + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619 + */ +#define asm_goto_output(x...) \ + do { asm volatile goto(x); asm (""); } while (0) + #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ @@ -138,7 +155,7 @@ #endif #define __diag_ignore_all(option, comment) \ - __diag_GCC(8, ignore, option) + __diag(__diag_GCC_ignore option) /* * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size" diff --git a/include/linux/compiler.h b/include/linux/compiler.h index bf9815eaf4aa..cdcdaa48b4d2 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -177,10 +177,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, __asm__ ("" : "=r" (var) : "0" (var)) #endif -/* Not-quite-unique ID. */ -#ifndef __UNIQUE_ID -# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) -#endif +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) /** * data_race - mark an expression as containing intentional data races @@ -231,6 +228,14 @@ static inline void *offset_to_ptr(const int *off) #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) /* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + +/* * Whether 'type' is a signed type or an unsigned type. Supports scalar types, * bool and also pointer types. */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index c523c6683789..663d8791c871 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -2,6 +2,15 @@ #ifndef __LINUX_COMPILER_TYPES_H #define __LINUX_COMPILER_TYPES_H +/* + * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21. + * In the meantime, to support gcc < 10, we implement __has_builtin + * by hand. + */ +#ifndef __has_builtin +#define __has_builtin(x) (0) +#endif + #ifndef __ASSEMBLY__ /* @@ -134,17 +143,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __preserve_most #endif -/* Builtins */ - -/* - * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21. - * In the meantime, to support gcc < 10, we implement __has_builtin - * by hand. - */ -#ifndef __has_builtin -#define __has_builtin(x) (0) -#endif - /* Compiler specific macros. */ #ifdef __clang__ #include <linux/compiler-clang.h> @@ -352,8 +350,20 @@ struct ftrace_likely_data { # define __realloc_size(x, ...) #endif -#ifndef asm_volatile_goto -#define asm_volatile_goto(x...) asm goto(x) +/* + * When the size of an allocated object is needed, use the best available + * mechanism to find it. (For cases where sizeof() cannot be used.) + */ +#if __has_builtin(__builtin_dynamic_object_size) +#define __struct_size(p) __builtin_dynamic_object_size(p, 0) +#define __member_size(p) __builtin_dynamic_object_size(p, 1) +#else +#define __struct_size(p) __builtin_object_size(p, 0) +#define __member_size(p) __builtin_object_size(p, 1) +#endif + +#ifndef asm_goto_output +#define asm_goto_output(x...) asm goto(x) #endif #ifdef CONFIG_CC_HAS_ASM_INLINE diff --git a/include/linux/connector.h b/include/linux/connector.h index cec2d99ae902..70bc1160f3d8 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -100,8 +100,7 @@ void cn_del_callback(const struct cb_id *id); */ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask, - int (*filter)(struct sock *dsk, struct sk_buff *skb, - void *data), + netlink_filter_fn filter, void *filter_data); /** diff --git a/include/linux/console.h b/include/linux/console.h index d3195664baa5..779d388af8a0 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -101,6 +101,13 @@ extern const struct consw dummy_con; /* dummy console buffer */ extern const struct consw vga_con; /* VGA text console */ extern const struct consw newport_con; /* SGI Newport console */ +struct screen_info; +#ifdef CONFIG_VGA_CONSOLE +void vgacon_register_screen(struct screen_info *si); +#else +static inline void vgacon_register_screen(struct screen_info *si) { } +#endif + int con_is_bound(const struct consw *csw); int do_unregister_con_driver(const struct consw *csw); int do_take_over_console(const struct consw *sw, int first, int last, int deflt); @@ -154,6 +161,10 @@ static inline int con_debug_leave(void) * receiving the printk spam for obvious reasons. * @CON_EXTENDED: The console supports the extended output format of * /dev/kmesg which requires a larger output buffer. + * @CON_SUSPENDED: Indicates if a console is suspended. If true, the + * printing callbacks must not be called. + * @CON_NBCON: Console can operate outside of the legacy style console_lock + * constraints. */ enum cons_flags { CON_PRINTBUFFER = BIT(0), @@ -163,6 +174,112 @@ enum cons_flags { CON_ANYTIME = BIT(4), CON_BRL = BIT(5), CON_EXTENDED = BIT(6), + CON_SUSPENDED = BIT(7), + CON_NBCON = BIT(8), +}; + +/** + * struct nbcon_state - console state for nbcon consoles + * @atom: Compound of the state fields for atomic operations + * + * @req_prio: The priority of a handover request + * @prio: The priority of the current owner + * @unsafe: Console is busy in a non takeover region + * @unsafe_takeover: A hostile takeover in an unsafe state happened in the + * past. The console cannot be safe until re-initialized. + * @cpu: The CPU on which the owner runs + * + * To be used for reading and preparing of the value stored in the nbcon + * state variable @console::nbcon_state. + * + * The @prio and @req_prio fields are particularly important to allow + * spin-waiting to timeout and give up without the risk of a waiter being + * assigned the lock after giving up. + */ +struct nbcon_state { + union { + unsigned int atom; + struct { + unsigned int prio : 2; + unsigned int req_prio : 2; + unsigned int unsafe : 1; + unsigned int unsafe_takeover : 1; + unsigned int cpu : 24; + }; + }; +}; + +/* + * The nbcon_state struct is used to easily create and interpret values that + * are stored in the @console::nbcon_state variable. Ensure this struct stays + * within the size boundaries of the atomic variable's underlying type in + * order to avoid any accidental truncation. + */ +static_assert(sizeof(struct nbcon_state) <= sizeof(int)); + +/** + * nbcon_prio - console owner priority for nbcon consoles + * @NBCON_PRIO_NONE: Unused + * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage + * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...) + * @NBCON_PRIO_PANIC: Panic output + * @NBCON_PRIO_MAX: The number of priority levels + * + * A higher priority context can takeover the console when it is + * in the safe state. The final attempt to flush consoles in panic() + * can be allowed to do so even in an unsafe state (Hope and pray). + */ +enum nbcon_prio { + NBCON_PRIO_NONE = 0, + NBCON_PRIO_NORMAL, + NBCON_PRIO_EMERGENCY, + NBCON_PRIO_PANIC, + NBCON_PRIO_MAX, +}; + +struct console; +struct printk_buffers; + +/** + * struct nbcon_context - Context for console acquire/release + * @console: The associated console + * @spinwait_max_us: Limit for spin-wait acquire + * @prio: Priority of the context + * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can + * be used only with NBCON_PRIO_PANIC @prio. It + * might cause a system freeze when the console + * is used later. + * @backlog: Ringbuffer has pending records + * @pbufs: Pointer to the text buffer for this context + * @seq: The sequence number to print for this context + */ +struct nbcon_context { + /* members set by caller */ + struct console *console; + unsigned int spinwait_max_us; + enum nbcon_prio prio; + unsigned int allow_unsafe_takeover : 1; + + /* members set by emit */ + unsigned int backlog : 1; + + /* members set by acquire */ + struct printk_buffers *pbufs; + u64 seq; +}; + +/** + * struct nbcon_write_context - Context handed to the nbcon write callbacks + * @ctxt: The core console context + * @outbuf: Pointer to the text buffer for output + * @len: Length to write + * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred + */ +struct nbcon_write_context { + struct nbcon_context __private ctxt; + char *outbuf; + unsigned int len; + bool unsafe_takeover; }; /** @@ -184,6 +301,11 @@ enum cons_flags { * @dropped: Number of unreported dropped ringbuffer records * @data: Driver private data * @node: hlist node for the console list + * + * @write_atomic: Write callback for atomic context + * @nbcon_state: State for nbcon consoles + * @nbcon_seq: Sequence number of the next record for nbcon to print + * @pbufs: Pointer to nbcon private buffer */ struct console { char name[16]; @@ -203,6 +325,13 @@ struct console { unsigned long dropped; void *data; struct hlist_node node; + + /* nbcon console specific members */ + bool (*write_atomic)(struct console *con, + struct nbcon_write_context *wctxt); + atomic_t __private nbcon_state; + atomic_long_t __private nbcon_seq; + struct printk_buffers *pbufs; }; #ifdef CONFIG_LOCKDEP @@ -329,6 +458,16 @@ static inline bool console_is_registered(const struct console *con) lockdep_assert_console_list_lock_held(); \ hlist_for_each_entry(con, &console_list, node) +#ifdef CONFIG_PRINTK +extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt); +extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt); +extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt); +#else +static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; } +static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; } +static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; } +#endif + extern int console_set_on_cmdline; extern struct console *early_console; @@ -337,7 +476,7 @@ enum con_flush_mode { CONSOLE_REPLAY_ALL, }; -extern int add_preferred_console(char *name, int idx, char *options); +extern int add_preferred_console(const char *name, const short idx, char *options); extern void console_force_preferred_locked(struct console *con); extern void register_console(struct console *); extern int unregister_console(struct console *); diff --git a/include/linux/const.h b/include/linux/const.h index 435ddd72d2c4..81b8aae5a855 100644 --- a/include/linux/const.h +++ b/include/linux/const.h @@ -3,12 +3,4 @@ #include <vdso/const.h> -/* - * This returns a constant expression while determining if an argument is - * a constant expression, most importantly without evaluating the argument. - * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> - */ -#define __is_constexpr(x) \ - (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) - #endif /* _LINUX_CONST_H */ diff --git a/include/linux/container.h b/include/linux/container.h index 2566a1baa736..dd00cc918a92 100644 --- a/include/linux/container.h +++ b/include/linux/container.h @@ -12,7 +12,7 @@ #include <linux/device.h> /* drivers/base/power/container.c */ -extern struct bus_type container_subsys; +extern const struct bus_type container_subsys; struct container_dev { struct device dev; diff --git a/include/linux/coresight.h b/include/linux/coresight.h index a269fffaf991..a4cb7dd6ca23 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -64,6 +64,7 @@ enum coresight_dev_subtype_source { CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, + CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM, CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS, }; diff --git a/include/linux/cper.h b/include/linux/cper.h index c1a7dc325121..265b0f8fc0b3 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -90,6 +90,29 @@ enum { GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \ 0x72, 0x2D, 0xEB, 0x41) +/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */ +/* + * General Media Event Record + * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 + */ +#define CPER_SEC_CXL_GEN_MEDIA_GUID \ + GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \ + 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6) +/* + * DRAM Event Record + * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 + */ +#define CPER_SEC_CXL_DRAM_GUID \ + GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \ + 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24) +/* + * Memory Module Event Record + * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45 + */ +#define CPER_SEC_CXL_MEM_MODULE_GUID \ + GUID_INIT(0xfe927475, 0xdd59, 0x4339, \ + 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74) + /* * Flags bits definitions for flags in struct cper_record_header * If set, the error has been recovered diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 0abd60a7987b..dcb89c987164 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -80,12 +80,19 @@ extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, const struct attribute_group **groups, const char *fmt, ...); +extern bool arch_cpu_is_hotpluggable(int cpu); +extern int arch_register_cpu(int cpu); +extern void arch_unregister_cpu(int cpu); #ifdef CONFIG_HOTPLUG_CPU extern void unregister_cpu(struct cpu *cpu); extern ssize_t arch_cpu_probe(const char *, size_t); extern ssize_t arch_cpu_release(const char *, size_t); #endif +#ifdef CONFIG_GENERIC_CPU_DEVICES +DECLARE_PER_CPU(struct cpu, cpu_devices); +#endif + /* * These states are not related to the core CPU hotplug mechanism. They are * used by various (sub)architectures to track internal state @@ -153,6 +160,8 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; } static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } #endif /* !CONFIG_HOTPLUG_CPU */ +DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock()) + #ifdef CONFIG_PM_SLEEP_SMP extern int freeze_secondary_cpus(int primary); extern void thaw_secondary_cpus(void); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 43b363a99215..afda5f24d3dd 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -141,6 +141,9 @@ struct cpufreq_policy { */ bool dvfs_possible_from_any_cpu; + /* Per policy boost enabled flag. */ + bool boost_enabled; + /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ unsigned int cached_target_freq; unsigned int cached_resolved_idx; @@ -1190,14 +1193,6 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_ } #endif -#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) -void sched_cpufreq_governor_change(struct cpufreq_policy *policy, - struct cpufreq_governor *old_gov); -#else -static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy, - struct cpufreq_governor *old_gov) { } -#endif - extern unsigned int arch_freq_get_on_cpu(int cpu); #ifndef arch_set_freq_scale @@ -1208,6 +1203,7 @@ void arch_set_freq_scale(const struct cpumask *cpus, { } #endif + /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 06dda85f0424..172d0a743e5d 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -66,15 +66,12 @@ enum cpuhp_state { CPUHP_PERF_POWER, CPUHP_PERF_SUPERH, CPUHP_X86_HPET_DEAD, - CPUHP_X86_APB_DEAD, CPUHP_X86_MCE_DEAD, CPUHP_VIRT_NET_DEAD, CPUHP_IBMVNIC_DEAD, CPUHP_SLUB_DEAD, CPUHP_DEBUG_OBJ_DEAD, CPUHP_MM_WRITEBACK_DEAD, - /* Must be after CPUHP_MM_VMSTAT_DEAD */ - CPUHP_MM_DEMOTION_DEAD, CPUHP_MM_VMSTAT_DEAD, CPUHP_SOFTIRQ_DEAD, CPUHP_NET_MVNETA_DEAD, @@ -90,14 +87,12 @@ enum cpuhp_state { CPUHP_FS_BUFF_DEAD, CPUHP_PRINTK_DEAD, CPUHP_MM_MEMCQ_DEAD, - CPUHP_XFS_DEAD, CPUHP_PERCPU_CNT_DEAD, CPUHP_RADIX_DEAD, CPUHP_PAGE_ALLOC, CPUHP_NET_DEV_DEAD, CPUHP_PCI_XGENE_DEAD, CPUHP_IOMMU_IOVA_DEAD, - CPUHP_LUSTRE_CFS_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, CPUHP_AP_DTPM_CPU_DEAD, @@ -109,7 +104,6 @@ enum cpuhp_state { CPUHP_X2APIC_PREPARE, CPUHP_SMPCFD_PREPARE, CPUHP_RELAY_PREPARE, - CPUHP_SLAB_PREPARE, CPUHP_MD_RAID5_PREPARE, CPUHP_RCUTREE_PREP, CPUHP_CPUIDLE_COUPLED_PREPARE, @@ -119,13 +113,11 @@ enum cpuhp_state { CPUHP_XEN_EVTCHN_PREPARE, CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_SH_SH3X_PREPARE, - CPUHP_NET_FLOW_PREPARE, CPUHP_TOPOLOGY_PREPARE, CPUHP_NET_IUCV_PREPARE, CPUHP_ARM_BL_PREPARE, CPUHP_TRACE_RB_PREPARE, CPUHP_MM_ZS_PREPARE, - CPUHP_MM_ZSWP_MEM_PREPARE, CPUHP_MM_ZSWP_POOL_PREPARE, CPUHP_KVM_PPC_BOOK3S_PREPARE, CPUHP_ZCOMP_PREPARE, @@ -152,18 +144,14 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, - CPUHP_AP_IRQ_RISCV_STARTING, CPUHP_AP_IRQ_LOONGARCH_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, - CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING, - CPUHP_AP_PERF_X86_CQM_STARTING, CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, - CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, @@ -173,13 +161,13 @@ enum cpuhp_state { CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, + CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_JCORE_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_TEGRA_TIMER_STARTING, CPUHP_AP_ARMADA_TIMER_STARTING, - CPUHP_AP_MARCO_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_ARC_TIMER_STARTING, CPUHP_AP_RISCV_TIMER_STARTING, @@ -190,10 +178,12 @@ enum cpuhp_state { /* Must be the last timer callback */ CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, + CPUHP_AP_ARM_XEN_RUNSTATE_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, + CPUHP_AP_HRTIMERS_DYING, CPUHP_AP_X86_TBOOT_DYING, CPUHP_AP_ARM_CACHE_B15_RAC_DYING, CPUHP_AP_ONLINE, @@ -205,7 +195,6 @@ enum cpuhp_state { CPUHP_AP_KVM_ONLINE, CPUHP_AP_SCHED_WAIT_EMPTY, CPUHP_AP_SMPBOOT_THREADS, - CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, CPUHP_AP_BLK_MQ_ONLINE, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, @@ -216,9 +205,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, CPUHP_AP_PERF_X86_RAPL_ONLINE, - CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, - CPUHP_AP_PERF_X86_IDXD_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, @@ -250,9 +237,7 @@ enum cpuhp_state { CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_ONLINE_DYN, - CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, - /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */ - CPUHP_AP_MM_DEMOTION_ONLINE, + CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40, CPUHP_AP_X86_HPET_ONLINE, CPUHP_AP_X86_KVM_CLK_ONLINE, CPUHP_AP_ACTIVE, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index f10fb87d49db..cfb545841a2c 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -4,7 +4,7 @@ /* * Cpumasks provide a bitmap suitable for representing the - * set of CPU's in a system, one bit position per CPU number. In general, + * set of CPUs in a system, one bit position per CPU number. In general, * only nr_cpu_ids (<= NR_CPUS) bits are valid. */ #include <linux/kernel.h> @@ -97,7 +97,7 @@ static inline void set_nr_cpu_ids(unsigned int nr) * * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. * - * The cpu_possible_mask is fixed at boot time, as the set of CPU id's + * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs * that it is possible might ever be plugged in at anytime during the * life of that system boot. The cpu_present_mask is dynamic(*), * representing which CPUs are currently plugged in. And @@ -112,7 +112,7 @@ static inline void set_nr_cpu_ids(unsigned int nr) * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. * * Subtleties: - * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode + * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode * assumption that their single CPU is online. The UP * cpu_{online,possible,present}_masks are placebos. Changing them * will have no useful affect on the following num_*_cpus() @@ -155,7 +155,7 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu) * cpumask_first - get the first cpu in a cpumask * @srcp: the cpumask pointer * - * Returns >= nr_cpu_ids if no cpus set. + * Return: >= nr_cpu_ids if no cpus set. */ static inline unsigned int cpumask_first(const struct cpumask *srcp) { @@ -166,7 +166,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp) * cpumask_first_zero - get the first unset cpu in a cpumask * @srcp: the cpumask pointer * - * Returns >= nr_cpu_ids if all cpus are set. + * Return: >= nr_cpu_ids if all cpus are set. */ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) { @@ -178,7 +178,7 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) * @srcp1: the first input * @srcp2: the second input * - * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). + * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). */ static inline unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2) @@ -190,7 +190,7 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask * cpumask_last - get the last CPU in a cpumask * @srcp: - the cpumask pointer * - * Returns >= nr_cpumask_bits if no CPUs set. + * Return: >= nr_cpumask_bits if no CPUs set. */ static inline unsigned int cpumask_last(const struct cpumask *srcp) { @@ -199,10 +199,10 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp) /** * cpumask_next - get the next cpu in a cpumask - * @n: the cpu prior to the place to search (ie. return will be > @n) + * @n: the cpu prior to the place to search (i.e. return will be > @n) * @srcp: the cpumask pointer * - * Returns >= nr_cpu_ids if no further cpus set. + * Return: >= nr_cpu_ids if no further cpus set. */ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) @@ -215,10 +215,10 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp) /** * cpumask_next_zero - get the next unset cpu in a cpumask - * @n: the cpu prior to the place to search (ie. return will be > @n) + * @n: the cpu prior to the place to search (i.e. return will be > @n) * @srcp: the cpumask pointer * - * Returns >= nr_cpu_ids if no further cpus unset. + * Return: >= nr_cpu_ids if no further cpus unset. */ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) { @@ -254,11 +254,11 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp); /** * cpumask_next_and - get the next cpu in *src1p & *src2p - * @n: the cpu prior to the place to search (ie. return will be > @n) + * @n: the cpu prior to the place to search (i.e. return will be > @n) * @src1p: the first cpumask pointer * @src2p: the second cpumask pointer * - * Returns >= nr_cpu_ids if no further cpus set in both. + * Return: >= nr_cpu_ids if no further cpus set in both. */ static inline unsigned int cpumask_next_and(int n, const struct cpumask *src1p, @@ -373,7 +373,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta * @cpu: the cpu to ignore. * * Often used to find any cpu but smp_processor_id() in a mask. - * Returns >= nr_cpu_ids if no cpus set. + * Return: >= nr_cpu_ids if no cpus set. */ static inline unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) @@ -388,11 +388,11 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) } /** - * cpumask_nth - get the first cpu in a cpumask + * cpumask_nth - get the Nth cpu in a cpumask * @srcp: the cpumask pointer - * @cpu: the N'th cpu to find, starting from 0 + * @cpu: the Nth cpu to find, starting from 0 * - * Returns >= nr_cpu_ids if such cpu doesn't exist. + * Return: >= nr_cpu_ids if such cpu doesn't exist. */ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp) { @@ -400,12 +400,12 @@ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *s } /** - * cpumask_nth_and - get the first cpu in 2 cpumasks + * cpumask_nth_and - get the Nth cpu in 2 cpumasks * @srcp1: the cpumask pointer * @srcp2: the cpumask pointer - * @cpu: the N'th cpu to find, starting from 0 + * @cpu: the Nth cpu to find, starting from 0 * - * Returns >= nr_cpu_ids if such cpu doesn't exist. + * Return: >= nr_cpu_ids if such cpu doesn't exist. */ static inline unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1, @@ -416,12 +416,12 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1, } /** - * cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd. + * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd. * @srcp1: the cpumask pointer * @srcp2: the cpumask pointer - * @cpu: the N'th cpu to find, starting from 0 + * @cpu: the Nth cpu to find, starting from 0 * - * Returns >= nr_cpu_ids if such cpu doesn't exist. + * Return: >= nr_cpu_ids if such cpu doesn't exist. */ static inline unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1, @@ -436,9 +436,9 @@ unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1, * @srcp1: the cpumask pointer * @srcp2: the cpumask pointer * @srcp3: the cpumask pointer - * @cpu: the N'th cpu to find, starting from 0 + * @cpu: the Nth cpu to find, starting from 0 * - * Returns >= nr_cpu_ids if such cpu doesn't exist. + * Return: >= nr_cpu_ids if such cpu doesn't exist. */ static __always_inline unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1, @@ -497,7 +497,7 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * - * Returns true if @cpu is set in @cpumask, else returns false + * Return: true if @cpu is set in @cpumask, else returns false */ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask) { @@ -509,9 +509,9 @@ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpum * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * - * Returns true if @cpu is set in old bitmap of @cpumask, else returns false - * * test_and_set_bit wrapper for cpumasks. + * + * Return: true if @cpu is set in old bitmap of @cpumask, else returns false */ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) { @@ -523,9 +523,9 @@ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cp * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * - * Returns true if @cpu is set in old bitmap of @cpumask, else returns false - * * test_and_clear_bit wrapper for cpumasks. + * + * Return: true if @cpu is set in old bitmap of @cpumask, else returns false */ static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) { @@ -560,7 +560,7 @@ static inline void cpumask_clear(struct cpumask *dstp) * @src1p: the first input * @src2p: the second input * - * If *@dstp is empty, returns false, else returns true + * Return: false if *@dstp is empty, else returns true */ static inline bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, @@ -603,7 +603,7 @@ static inline void cpumask_xor(struct cpumask *dstp, * @src1p: the first input * @src2p: the second input * - * If *@dstp is empty, returns false, else returns true + * Return: false if *@dstp is empty, else returns true */ static inline bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, @@ -617,6 +617,8 @@ static inline bool cpumask_andnot(struct cpumask *dstp, * cpumask_equal - *src1p == *src2p * @src1p: the first input * @src2p: the second input + * + * Return: true if the cpumasks are equal, false if not */ static inline bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p) @@ -630,6 +632,9 @@ static inline bool cpumask_equal(const struct cpumask *src1p, * @src1p: the first input * @src2p: the second input * @src3p: the third input + * + * Return: true if first cpumask ORed with second cpumask == third cpumask, + * otherwise false */ static inline bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p, @@ -643,6 +648,9 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p, * cpumask_intersects - (*src1p & *src2p) != 0 * @src1p: the first input * @src2p: the second input + * + * Return: true if first cpumask ANDed with second cpumask is non-empty, + * otherwise false */ static inline bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) @@ -656,7 +664,7 @@ static inline bool cpumask_intersects(const struct cpumask *src1p, * @src1p: the first input * @src2p: the second input * - * Returns true if *@src1p is a subset of *@src2p, else returns false + * Return: true if *@src1p is a subset of *@src2p, else returns false */ static inline bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) @@ -668,6 +676,8 @@ static inline bool cpumask_subset(const struct cpumask *src1p, /** * cpumask_empty - *srcp == 0 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. + * + * Return: true if srcp is empty (has no bits set), else false */ static inline bool cpumask_empty(const struct cpumask *srcp) { @@ -677,6 +687,8 @@ static inline bool cpumask_empty(const struct cpumask *srcp) /** * cpumask_full - *srcp == 0xFFFFFFFF... * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. + * + * Return: true if srcp is full (has all bits set), else false */ static inline bool cpumask_full(const struct cpumask *srcp) { @@ -686,6 +698,8 @@ static inline bool cpumask_full(const struct cpumask *srcp) /** * cpumask_weight - Count of bits in *srcp * @srcp: the cpumask to count bits (< nr_cpu_ids) in. + * + * Return: count of bits set in *srcp */ static inline unsigned int cpumask_weight(const struct cpumask *srcp) { @@ -696,6 +710,8 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp) * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2) * @srcp1: the cpumask to count bits (< nr_cpu_ids) in. * @srcp2: the cpumask to count bits (< nr_cpu_ids) in. + * + * Return: count of bits set in both *srcp1 and *srcp2 */ static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2) @@ -744,7 +760,7 @@ static inline void cpumask_copy(struct cpumask *dstp, * cpumask_any - pick a "random" cpu from *srcp * @srcp: the input cpumask * - * Returns >= nr_cpu_ids if no cpus set. + * Return: >= nr_cpu_ids if no cpus set. */ #define cpumask_any(srcp) cpumask_first(srcp) @@ -753,7 +769,7 @@ static inline void cpumask_copy(struct cpumask *dstp, * @mask1: the first input cpumask * @mask2: the second input cpumask * - * Returns >= nr_cpu_ids if no cpus set. + * Return: >= nr_cpu_ids if no cpus set. */ #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) @@ -769,7 +785,7 @@ static inline void cpumask_copy(struct cpumask *dstp, * @len: the length of the buffer * @dstp: the cpumask to set. * - * Returns -errno, or 0 for success. + * Return: -errno, or 0 for success. */ static inline int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp) @@ -783,7 +799,7 @@ static inline int cpumask_parse_user(const char __user *buf, int len, * @len: the length of the buffer * @dstp: the cpumask to set. * - * Returns -errno, or 0 for success. + * Return: -errno, or 0 for success. */ static inline int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp) @@ -797,7 +813,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, * @buf: the buffer to extract from * @dstp: the cpumask to set. * - * Returns -errno, or 0 for success. + * Return: -errno, or 0 for success. */ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) { @@ -809,7 +825,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) * @buf: the buffer to extract from * @dstp: the cpumask to set. * - * Returns -errno, or 0 for success. + * Return: -errno, or 0 for success. */ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) { @@ -817,7 +833,9 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) } /** - * cpumask_size - size to allocate for a 'struct cpumask' in bytes + * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes + * + * Return: size to allocate for a &struct cpumask in bytes */ static inline unsigned int cpumask_size(void) { @@ -831,7 +849,7 @@ static inline unsigned int cpumask_size(void) * little more difficult, we typedef cpumask_var_t to an array or a * pointer: doing &mask on an array is a noop, so it still works. * - * ie. + * i.e. * cpumask_var_t tmpmask; * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) * return -ENOMEM; @@ -887,6 +905,8 @@ bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) * a nop returning a constant 1 (in <linux/cpumask.h>). * * See alloc_cpumask_var_node. + * + * Return: %true if allocation succeeded, %false if not */ static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) @@ -1025,7 +1045,7 @@ set_cpu_dying(unsigned int cpu, bool dying) } /** - * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * + * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask * * @bitmap: the bitmap * * There are a few places where cpumask_var_t isn't appropriate and @@ -1068,6 +1088,8 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) * interface gives only a momentary snapshot and is not protected against * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held * region. + * + * Return: momentary snapshot of the number of online CPUs */ static __always_inline unsigned int num_online_cpus(void) { @@ -1160,7 +1182,7 @@ static inline bool cpu_dying(unsigned int cpu) * @mask: the cpumask to copy * @buf: the buffer to copy into * - * Returns the length of the (null-terminated) @buf string, zero if + * Return: the length of the (null-terminated) @buf string, zero if * nothing is copied. */ static inline ssize_t @@ -1183,7 +1205,7 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) * cpumask; Typically used by bin_attribute to export cpumask bitmask * ABI. * - * Returns the length of how many bytes have been copied, excluding + * Return: the length of how many bytes have been copied, excluding * terminating '\0'. */ static inline ssize_t @@ -1204,6 +1226,9 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, * * Everything is same with the above cpumap_print_bitmask_to_buf() * except the print format. + * + * Return: the length of how many bytes have been copied, excluding + * terminating '\0'. */ static inline ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index d629094fac6e..875d12598bd2 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -77,6 +77,7 @@ extern void cpuset_lock(void); extern void cpuset_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); +extern bool cpuset_cpu_is_isolated(int cpu); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); @@ -207,6 +208,11 @@ static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) return false; } +static inline bool cpuset_cpu_is_isolated(int cpu) +{ + return false; +} + static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) { return node_possible_map; diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 0c06561bf5ff..9eaeaafe0cad 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -5,6 +5,14 @@ #include <linux/linkage.h> #include <linux/elfcore.h> #include <linux/elf.h> +#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION +#include <asm/crash_core.h> +#endif + +/* Location of a reserved region to hold the crash kernel. + */ +extern struct resource crashk_res; +extern struct resource crashk_low_res; #define CRASH_CORE_NOTE_NAME "CORE" #define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) @@ -80,11 +88,36 @@ Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void final_note(Elf_Word *buf); int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, - unsigned long long *crash_size, unsigned long long *crash_base); -int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, - unsigned long long *crash_size, unsigned long long *crash_base); -int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, - unsigned long long *crash_size, unsigned long long *crash_base); + unsigned long long *crash_size, unsigned long long *crash_base, + unsigned long long *low_size, bool *high); + +#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION +#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE +#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20) +#endif +#ifndef CRASH_ALIGN +#define CRASH_ALIGN SZ_2M +#endif +#ifndef CRASH_ADDR_LOW_MAX +#define CRASH_ADDR_LOW_MAX SZ_4G +#endif +#ifndef CRASH_ADDR_HIGH_MAX +#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM() +#endif + +void __init reserve_crashkernel_generic(char *cmdline, + unsigned long long crash_size, + unsigned long long crash_base, + unsigned long long crash_low_size, + bool high); +#else +static inline void __init reserve_crashkernel_generic(char *cmdline, + unsigned long long crash_size, + unsigned long long crash_base, + unsigned long long crash_low_size, + bool high) +{} +#endif /* Alignment required for elf header segment */ #define ELF_CORE_HEADER_ALIGN 4096 @@ -92,7 +125,7 @@ int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, struct crash_mem { unsigned int max_nr_ranges; unsigned int nr_ranges; - struct range ranges[]; + struct range ranges[] __counted_by(max_nr_ranges); }; extern int crash_exclude_mem_range(struct crash_mem *mem, diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 0f3a656293b0..acc55626afdc 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -50,6 +50,7 @@ void vmcore_cleanup(void); #define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) #endif +#ifndef is_kdump_kernel /* * is_kdump_kernel() checks whether this kernel is booting after a panic of * previous kernel or not. This is determined by checking if previous kernel @@ -64,6 +65,7 @@ static inline bool is_kdump_kernel(void) { return elfcorehdr_addr != ELFCORE_ADDR_MAX; } +#endif /* is_vmcore_usable() checks if the kernel is booting after a panic and * the vmcore region is usable. @@ -75,7 +77,8 @@ static inline bool is_kdump_kernel(void) static inline int is_vmcore_usable(void) { - return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0; + return elfcorehdr_addr != ELFCORE_ADDR_ERR && + elfcorehdr_addr != ELFCORE_ADDR_MAX ? 1 : 0; } /* vmcore_unusable() marks the vmcore as unusable, @@ -84,8 +87,7 @@ static inline int is_vmcore_usable(void) static inline void vmcore_unusable(void) { - if (is_kdump_kernel()) - elfcorehdr_addr = ELFCORE_ADDR_ERR; + elfcorehdr_addr = ELFCORE_ADDR_ERR; } /** diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h index 72c92c396bb8..cd4f420231ba 100644 --- a/include/linux/crc-ccitt.h +++ b/include/linux/crc-ccitt.h @@ -5,19 +5,12 @@ #include <linux/types.h> extern u16 const crc_ccitt_table[256]; -extern u16 const crc_ccitt_false_table[256]; extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); -extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len); static inline u16 crc_ccitt_byte(u16 crc, const u8 c) { return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; } -static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c) -{ - return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c]; -} - #endif /* _LINUX_CRC_CCITT_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index f923528d5cc4..2976f534a7a3 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/key.h> #include <linux/atomic.h> +#include <linux/refcount.h> #include <linux/uidgid.h> #include <linux/sched.h> #include <linux/sched/user.h> @@ -23,7 +24,7 @@ struct inode; * COW Supplementary groups list */ struct group_info { - atomic_t usage; + refcount_t usage; int ngroups; kgid_t gid[]; } __randomize_layout; @@ -39,7 +40,7 @@ struct group_info { */ static inline struct group_info *get_group_info(struct group_info *gi) { - atomic_inc(&gi->usage); + refcount_inc(&gi->usage); return gi; } @@ -49,7 +50,7 @@ static inline struct group_info *get_group_info(struct group_info *gi) */ #define put_group_info(group_info) \ do { \ - if (atomic_dec_and_test(&(group_info)->usage)) \ + if (refcount_dec_and_test(&(group_info)->usage)) \ groups_free(group_info); \ } while (0) @@ -108,14 +109,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp) * same context as task->real_cred. */ struct cred { - atomic_t usage; -#ifdef CONFIG_DEBUG_CREDENTIALS - atomic_t subscribers; /* number of processes subscribed */ - void *put_addr; - unsigned magic; -#define CRED_MAGIC 0x43736564 -#define CRED_MAGIC_DEAD 0x44656144 -#endif + atomic_long_t usage; kuid_t uid; /* real UID of the task */ kgid_t gid; /* real GID of the task */ kuid_t suid; /* saved UID of the task */ @@ -171,46 +165,6 @@ extern int cred_fscmp(const struct cred *, const struct cred *); extern void __init cred_init(void); extern int set_cred_ucounts(struct cred *); -/* - * check for validity of credentials - */ -#ifdef CONFIG_DEBUG_CREDENTIALS -extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned); -extern void __validate_process_creds(struct task_struct *, - const char *, unsigned); - -extern bool creds_are_invalid(const struct cred *cred); - -static inline void __validate_creds(const struct cred *cred, - const char *file, unsigned line) -{ - if (unlikely(creds_are_invalid(cred))) - __invalid_creds(cred, file, line); -} - -#define validate_creds(cred) \ -do { \ - __validate_creds((cred), __FILE__, __LINE__); \ -} while(0) - -#define validate_process_creds() \ -do { \ - __validate_process_creds(current, __FILE__, __LINE__); \ -} while(0) - -extern void validate_creds_for_do_exit(struct task_struct *); -#else -static inline void validate_creds(const struct cred *cred) -{ -} -static inline void validate_creds_for_do_exit(struct task_struct *tsk) -{ -} -static inline void validate_process_creds(void) -{ -} -#endif - static inline bool cap_ambient_invariant_ok(const struct cred *cred) { return cap_issubset(cred->cap_ambient, @@ -219,6 +173,20 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred) } /** + * get_new_cred_many - Get references on a new set of credentials + * @cred: The new credentials to reference + * @nr: Number of references to acquire + * + * Get references on the specified set of new credentials. The caller must + * release all acquired references. + */ +static inline struct cred *get_new_cred_many(struct cred *cred, int nr) +{ + atomic_long_add(nr, &cred->usage); + return cred; +} + +/** * get_new_cred - Get a reference on a new set of credentials * @cred: The new credentials to reference * @@ -227,16 +195,16 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred) */ static inline struct cred *get_new_cred(struct cred *cred) { - atomic_inc(&cred->usage); - return cred; + return get_new_cred_many(cred, 1); } /** - * get_cred - Get a reference on a set of credentials + * get_cred_many - Get references on a set of credentials * @cred: The credentials to reference + * @nr: Number of references to acquire * - * Get a reference on the specified set of credentials. The caller must - * release the reference. If %NULL is passed, it is returned with no action. + * Get references on the specified set of credentials. The caller must release + * all acquired reference. If %NULL is passed, it is returned with no action. * * This is used to deal with a committed set of credentials. Although the * pointer is const, this will temporarily discard the const and increment the @@ -244,14 +212,27 @@ static inline struct cred *get_new_cred(struct cred *cred) * accidental alteration of a set of credentials that should be considered * immutable. */ -static inline const struct cred *get_cred(const struct cred *cred) +static inline const struct cred *get_cred_many(const struct cred *cred, int nr) { struct cred *nonconst_cred = (struct cred *) cred; if (!cred) return cred; - validate_creds(cred); nonconst_cred->non_rcu = 0; - return get_new_cred(nonconst_cred); + return get_new_cred_many(nonconst_cred, nr); +} + +/* + * get_cred - Get a reference on a set of credentials + * @cred: The credentials to reference + * + * Get a reference on the specified set of credentials. The caller must + * release the reference. If %NULL is passed, it is returned with no action. + * + * This is used to deal with a committed set of credentials. + */ +static inline const struct cred *get_cred(const struct cred *cred) +{ + return get_cred_many(cred, 1); } static inline const struct cred *get_cred_rcu(const struct cred *cred) @@ -259,9 +240,8 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred) struct cred *nonconst_cred = (struct cred *) cred; if (!cred) return NULL; - if (!atomic_inc_not_zero(&nonconst_cred->usage)) + if (!atomic_long_inc_not_zero(&nonconst_cred->usage)) return NULL; - validate_creds(cred); nonconst_cred->non_rcu = 0; return cred; } @@ -269,6 +249,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred) /** * put_cred - Release a reference to a set of credentials * @cred: The credentials to release + * @nr: Number of references to release * * Release a reference to a set of credentials, deleting them when the last ref * is released. If %NULL is passed, nothing is done. @@ -277,17 +258,28 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred) * on task_struct are attached by const pointers to prevent accidental * alteration of otherwise immutable credential sets. */ -static inline void put_cred(const struct cred *_cred) +static inline void put_cred_many(const struct cred *_cred, int nr) { struct cred *cred = (struct cred *) _cred; if (cred) { - validate_creds(cred); - if (atomic_dec_and_test(&(cred)->usage)) + if (atomic_long_sub_and_test(nr, &cred->usage)) __put_cred(cred); } } +/* + * put_cred - Release a reference to a set of credentials + * @cred: The credentials to release + * + * Release a reference to a set of credentials, deleting them when the last ref + * is released. If %NULL is passed, nothing is done. + */ +static inline void put_cred(const struct cred *cred) +{ + put_cred_many(cred, 1); +} + /** * current_cred - Access the current task's subjective credentials * diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 31f6fee0c36c..b164da5e129e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,6 +24,7 @@ #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 +#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_SIG 0x00000007 @@ -35,8 +36,6 @@ #define CRYPTO_ALG_TYPE_SHASH 0x0000000e #define CRYPTO_ALG_TYPE_AHASH 0x0000000f -#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e -#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e #define CRYPTO_ALG_LARVAL 0x00000010 @@ -111,7 +110,6 @@ * crypto_aead_walksize() (with the remainder going at the end), no chunk * can cross a page boundary or a scatterlist element boundary. * ahash: - * - The result buffer must be aligned to the algorithm's alignmask. * - crypto_ahash_finup() must not be used unless the algorithm implements * ->finup() natively. */ @@ -279,18 +277,20 @@ struct compress_alg { * @cra_ctxsize: Size of the operational context of the transformation. This * value informs the kernel crypto API about the memory size * needed to be allocated for the transformation context. - * @cra_alignmask: Alignment mask for the input and output data buffer. The data - * buffer containing the input data for the algorithm must be - * aligned to this alignment mask. The data buffer for the - * output data must be aligned to this alignment mask. Note that - * the Crypto API will do the re-alignment in software, but - * only under special conditions and there is a performance hit. - * The re-alignment happens at these occasions for different - * @cra_u types: cipher -- For both input data and output data - * buffer; ahash -- For output hash destination buf; shash -- - * For output hash destination buf. - * This is needed on hardware which is flawed by design and - * cannot pick data from arbitrary addresses. + * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is + * 1 less than the alignment, in bytes, that the algorithm + * implementation requires for input and output buffers. When + * the crypto API is invoked with buffers that are not aligned + * to this alignment, the crypto API automatically utilizes + * appropriately aligned temporary buffers to comply with what + * the algorithm needs. (For scatterlists this happens only if + * the algorithm uses the skcipher_walk helper functions.) This + * misalignment handling carries a performance penalty, so it is + * preferred that algorithms do not set a nonzero alignmask. + * Also, crypto API users may wish to allocate buffers aligned + * to the alignmask of the algorithm being used, in order to + * avoid the API having to realign them. Note: the alignmask is + * not supported for hash algorithms and is always 0 for them. * @cra_priority: Priority of this transformation implementation. In case * multiple transformations with same @cra_name are available to * the Crypto API, the kernel will use the one with highest diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h new file mode 100644 index 000000000000..91125eca4c8a --- /dev/null +++ b/include/linux/cxl-event.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2023 Intel Corporation. */ +#ifndef _LINUX_CXL_EVENT_H +#define _LINUX_CXL_EVENT_H + +/* + * Common Event Record Format + * CXL rev 3.0 section 8.2.9.2.1; Table 8-42 + */ +struct cxl_event_record_hdr { + u8 length; + u8 flags[3]; + __le16 handle; + __le16 related_handle; + __le64 timestamp; + u8 maint_op_class; + u8 reserved[15]; +} __packed; + +#define CXL_EVENT_RECORD_DATA_LENGTH 0x50 +struct cxl_event_generic { + struct cxl_event_record_hdr hdr; + u8 data[CXL_EVENT_RECORD_DATA_LENGTH]; +} __packed; + +/* + * General Media Event Record + * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 + */ +#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10 +struct cxl_event_gen_media { + struct cxl_event_record_hdr hdr; + __le64 phys_addr; + u8 descriptor; + u8 type; + u8 transaction_type; + u8 validity_flags[2]; + u8 channel; + u8 rank; + u8 device[3]; + u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE]; + u8 reserved[46]; +} __packed; + +/* + * DRAM Event Record - DER + * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44 + */ +#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20 +struct cxl_event_dram { + struct cxl_event_record_hdr hdr; + __le64 phys_addr; + u8 descriptor; + u8 type; + u8 transaction_type; + u8 validity_flags[2]; + u8 channel; + u8 rank; + u8 nibble_mask[3]; + u8 bank_group; + u8 bank; + u8 row[3]; + u8 column[2]; + u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE]; + u8 reserved[0x17]; +} __packed; + +/* + * Get Health Info Record + * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100 + */ +struct cxl_get_health_info { + u8 health_status; + u8 media_status; + u8 add_status; + u8 life_used; + u8 device_temp[2]; + u8 dirty_shutdown_cnt[4]; + u8 cor_vol_err_cnt[4]; + u8 cor_per_err_cnt[4]; +} __packed; + +/* + * Memory Module Event Record + * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45 + */ +struct cxl_event_mem_module { + struct cxl_event_record_hdr hdr; + u8 event_type; + struct cxl_get_health_info info; + u8 reserved[0x3d]; +} __packed; + +union cxl_event { + struct cxl_event_generic generic; + struct cxl_event_gen_media gen_media; + struct cxl_event_dram dram; + struct cxl_event_mem_module mem_module; +} __packed; + +/* + * Common Event Record Format; in event logs + * CXL rev 3.0 section 8.2.9.2.1; Table 8-42 + */ +struct cxl_event_record_raw { + uuid_t id; + union cxl_event event; +} __packed; + +enum cxl_event_type { + CXL_CPER_EVENT_GENERIC, + CXL_CPER_EVENT_GEN_MEDIA, + CXL_CPER_EVENT_DRAM, + CXL_CPER_EVENT_MEM_MODULE, +}; + +#define CPER_CXL_DEVICE_ID_VALID BIT(0) +#define CPER_CXL_DEVICE_SN_VALID BIT(1) +#define CPER_CXL_COMP_EVENT_LOG_VALID BIT(2) +struct cxl_cper_event_rec { + struct { + u32 length; + u64 validation_bits; + struct cper_cxl_event_devid { + u16 vendor_id; + u16 device_id; + u8 func_num; + u8 device_num; + u8 bus_num; + u16 segment_num; + u16 slot_num; /* bits 2:0 reserved */ + u8 reserved; + } __packed device_id; + struct cper_cxl_event_sn { + u32 lower_dw; + u32 upper_dw; + } __packed dev_serial_num; + } __packed hdr; + + union cxl_event event; +} __packed; + +typedef void (*cxl_cper_callback)(enum cxl_event_type type, + struct cxl_cper_event_rec *rec); + +#ifdef CONFIG_ACPI_APEI_GHES +int cxl_cper_register_callback(cxl_cper_callback callback); +int cxl_cper_unregister_callback(cxl_cper_callback callback); +#else +static inline int cxl_cper_register_callback(cxl_cper_callback callback) +{ + return 0; +} + +static inline int cxl_cper_unregister_callback(cxl_cper_callback callback) +{ + return 0; +} +#endif + +#endif /* _LINUX_CXL_EVENT_H */ diff --git a/include/linux/damon.h b/include/linux/damon.h index ae2664d1d5f1..5881e4ac30be 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -2,7 +2,7 @@ /* * DAMON api * - * Author: SeongJae Park <sjpark@amazon.de> + * Author: SeongJae Park <sj@kernel.org> */ #ifndef _DAMON_H_ @@ -40,9 +40,24 @@ struct damon_addr_range { * @ar: The address range of the region. * @sampling_addr: Address of the sample for the next access check. * @nr_accesses: Access frequency of this region. + * @nr_accesses_bp: @nr_accesses in basis point (0.01%) that updated for + * each sampling interval. * @list: List head for siblings. * @age: Age of this region. * + * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be + * increased for every &damon_attrs->sample_interval if an access to the region + * during the last sampling interval is found. The update of this field should + * not be done with direct access but with the helper function, + * damon_update_region_access_rate(). + * + * @nr_accesses_bp is another representation of @nr_accesses in basis point + * (1 in 10,000) that updated for every &damon_attrs->sample_interval in a + * manner similar to moving sum. By the algorithm, this value becomes + * @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval. This can + * be used when the aggregation interval is too huge and therefore cannot wait + * for it before getting the access monitoring results. + * * @age is initially zero, increased for each aggregation interval, and reset * to zero again if the access frequency is significantly changed. If two * regions are merged into a new region, both @nr_accesses and @age of the new @@ -52,6 +67,7 @@ struct damon_region { struct damon_addr_range ar; unsigned long sampling_addr; unsigned int nr_accesses; + unsigned int nr_accesses_bp; struct list_head list; unsigned int age; @@ -120,6 +136,9 @@ enum damos_action { * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization. * @weight_age: Weight of the region's age for prioritization. * + * @get_score: Feedback function for self-tuning quota. + * @get_score_arg: Parameter for @get_score + * * To avoid consuming too much CPU time or IO resources for applying the * &struct damos->action to large memory, DAMON allows users to set time and/or * size quotas. The quotas can be set by writing non-zero values to &ms and @@ -137,6 +156,17 @@ enum damos_action { * You could customize the prioritization logic by setting &weight_sz, * &weight_nr_accesses, and &weight_age, because monitoring operations are * encouraged to respect those. + * + * If @get_score function pointer is set, DAMON calls it back with + * @get_score_arg and get the return value of it for every @reset_interval. + * Then, DAMON adjusts the effective quota using the return value as a feedback + * score to the current quota, using its internal feedback loop algorithm. + * + * The feedback loop algorithem assumes the quota input and the feedback score + * output are in a positive proportional relationship, and the goal of the + * tuning is getting the feedback screo value of 10,000. If @ms and/or @sz are + * set together, those work as a hard limit quota. If neither @ms nor @sz are + * set, the mechanism starts from the quota of one byte. */ struct damos_quota { unsigned long ms; @@ -147,6 +177,9 @@ struct damos_quota { unsigned int weight_nr_accesses; unsigned int weight_age; + unsigned long (*get_score)(void *arg); + void *get_score_arg; + /* private: */ /* For throughput estimation */ unsigned long total_charged_sz; @@ -163,6 +196,9 @@ struct damos_quota { /* For prioritization */ unsigned long histogram[DAMOS_MAX_SCORE + 1]; unsigned int min_score; + + /* For feedback loop */ + unsigned long esz_bp; }; /** @@ -298,24 +334,24 @@ struct damos_access_pattern { * struct damos - Represents a Data Access Monitoring-based Operation Scheme. * @pattern: Access pattern of target regions. * @action: &damo_action to be applied to the target regions. + * @apply_interval_us: The time between applying the @action. * @quota: Control the aggressiveness of this scheme. * @wmarks: Watermarks for automated (in)activation of this scheme. * @filters: Additional set of &struct damos_filter for &action. * @stat: Statistics of this scheme. * @list: List head for siblings. * - * For each aggregation interval, DAMON finds regions which fit in the + * For each @apply_interval_us, DAMON finds regions which fit in the * &pattern and applies &action to those. To avoid consuming too much * CPU time or IO resources for the &action, "a is used. * + * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead. + * * To do the work only when needed, schemes can be activated for specific * system situations using &wmarks. If all schemes that registered to the * monitoring context are inactive, DAMON stops monitoring either, and just * repeatedly checks the watermarks. * - * If all schemes that registered to a &struct damon_ctx are inactive, DAMON - * stops monitoring and just repeatedly checks the watermarks. - * * Before applying the &action to a memory region, &struct damon_operations * implementation could check pages of the region and skip &action to respect * &filters @@ -327,6 +363,14 @@ struct damos_access_pattern { struct damos { struct damos_access_pattern pattern; enum damos_action action; + unsigned long apply_interval_us; +/* private: internal use only */ + /* + * number of sample intervals that should be passed before applying + * @action + */ + unsigned long next_apply_sis; +/* public: */ struct damos_quota quota; struct damos_watermarks wmarks; struct list_head filters; @@ -472,13 +516,14 @@ struct damon_callback { * regions. * * For each @sample_interval, DAMON checks whether each region is accessed or - * not. It aggregates and keeps the access information (number of accesses to - * each region) for @aggr_interval time. DAMON also checks whether the target - * memory regions need update (e.g., by ``mmap()`` calls from the application, - * in case of virtual memory monitoring) and applies the changes for each - * @ops_update_interval. All time intervals are in micro-seconds. - * Please refer to &struct damon_operations and &struct damon_callback for more - * detail. + * not during the last @sample_interval. If such access is found, DAMON + * aggregates the information by increasing &damon_region->nr_accesses for + * @aggr_interval time. For each @aggr_interval, the count is reset. DAMON + * also checks whether the target memory regions need update (e.g., by + * ``mmap()`` calls from the application, in case of virtual memory monitoring) + * and applies the changes for each @ops_update_interval. All time intervals + * are in micro-seconds. Please refer to &struct damon_operations and &struct + * damon_callback for more detail. */ struct damon_attrs { unsigned long sample_interval; @@ -522,8 +567,20 @@ struct damon_ctx { struct damon_attrs attrs; /* private: internal use only */ - struct timespec64 last_aggregation; - struct timespec64 last_ops_update; + /* number of sample intervals that passed since this context started */ + unsigned long passed_sample_intervals; + /* + * number of sample intervals that should be passed before next + * aggregation + */ + unsigned long next_aggregation_sis; + /* + * number of sample intervals that should be passed before next ops + * update + */ + unsigned long next_ops_update_sis; + /* for waiting until the execution of the kdamond_fn is started */ + struct completion kdamond_started; /* public: */ struct task_struct *kdamond; @@ -608,6 +665,8 @@ void damon_add_region(struct damon_region *r, struct damon_target *t); void damon_destroy_region(struct damon_region *r, struct damon_target *t); int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, unsigned int nr_ranges); +void damon_update_region_access_rate(struct damon_region *r, bool accessed, + struct damon_attrs *attrs); struct damos_filter *damos_new_filter(enum damos_filter_type type, bool matching); @@ -615,7 +674,9 @@ void damos_add_filter(struct damos *s, struct damos_filter *f); void damos_destroy_filter(struct damos_filter *f); struct damos *damon_new_scheme(struct damos_access_pattern *pattern, - enum damos_action action, struct damos_quota *quota, + enum damos_action action, + unsigned long apply_interval_us, + struct damos_quota *quota, struct damos_watermarks *wmarks); void damon_add_scheme(struct damon_ctx *ctx, struct damos *s); void damon_destroy_scheme(struct damos *s); @@ -642,6 +703,13 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx) return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR; } +static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs) +{ + /* {aggr,sample}_interval are unsigned long, hence could overflow */ + return min(attrs->aggr_interval / attrs->sample_interval, + (unsigned long)UINT_MAX); +} + int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive); int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); diff --git a/include/linux/dax.h b/include/linux/dax.h index 22cd9902345d..b463502b16e1 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -159,8 +159,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, struct page *dax_layout_busy_page(struct address_space *mapping); struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); -dax_entry_t dax_lock_page(struct page *page); -void dax_unlock_page(struct page *page, dax_entry_t cookie); +dax_entry_t dax_lock_folio(struct folio *folio); +void dax_unlock_folio(struct folio *folio, dax_entry_t cookie); dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, unsigned long index, struct page **page); void dax_unlock_mapping_entry(struct address_space *mapping, @@ -182,14 +182,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping, return -EOPNOTSUPP; } -static inline dax_entry_t dax_lock_page(struct page *page) +static inline dax_entry_t dax_lock_folio(struct folio *folio) { - if (IS_DAX(page->mapping->host)) + if (IS_DAX(folio->mapping->host)) return ~0UL; return 0; } -static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) +static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) { } diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 6b351e009f59..1666c387861f 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -68,12 +68,12 @@ extern const struct qstr dotdot_name; * large memory footprint increase). */ #ifdef CONFIG_64BIT -# define DNAME_INLINE_LEN 32 /* 192 bytes */ +# define DNAME_INLINE_LEN 40 /* 192 bytes */ #else # ifdef CONFIG_SMP -# define DNAME_INLINE_LEN 36 /* 128 bytes */ -# else # define DNAME_INLINE_LEN 40 /* 128 bytes */ +# else +# define DNAME_INLINE_LEN 44 /* 128 bytes */ # endif #endif @@ -101,8 +101,8 @@ struct dentry { struct list_head d_lru; /* LRU list */ wait_queue_head_t *d_wait; /* in-lookup ones only */ }; - struct list_head d_child; /* child of parent list */ - struct list_head d_subdirs; /* our children */ + struct hlist_node d_sib; /* child of parent list */ + struct hlist_head d_children; /* our children */ /* * d_alias and d_rcu can share memory */ @@ -111,7 +111,7 @@ struct dentry { struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ struct rcu_head d_rcu; } d_u; -} __randomize_layout; +}; /* * dentry->d_lock spinlock nesting subclasses: @@ -151,13 +151,13 @@ struct dentry_operations { */ /* d_flags entries */ -#define DCACHE_OP_HASH 0x00000001 -#define DCACHE_OP_COMPARE 0x00000002 -#define DCACHE_OP_REVALIDATE 0x00000004 -#define DCACHE_OP_DELETE 0x00000008 -#define DCACHE_OP_PRUNE 0x00000010 +#define DCACHE_OP_HASH BIT(0) +#define DCACHE_OP_COMPARE BIT(1) +#define DCACHE_OP_REVALIDATE BIT(2) +#define DCACHE_OP_DELETE BIT(3) +#define DCACHE_OP_PRUNE BIT(4) -#define DCACHE_DISCONNECTED 0x00000020 +#define DCACHE_DISCONNECTED BIT(5) /* This dentry is possibly not currently connected to the dcache tree, in * which case its parent will either be itself, or will have this flag as * well. nfsd will not use a dentry with this bit set, but will first @@ -168,50 +168,46 @@ struct dentry_operations { * dentry into place and return that dentry rather than the passed one, * typically using d_splice_alias. */ -#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */ +#define DCACHE_REFERENCED BIT(6) /* Recently used, don't discard. */ -#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */ +#define DCACHE_DONTCACHE BIT(7) /* Purge from memory on final dput() */ -#define DCACHE_CANT_MOUNT 0x00000100 -#define DCACHE_GENOCIDE 0x00000200 -#define DCACHE_SHRINK_LIST 0x00000400 +#define DCACHE_CANT_MOUNT BIT(8) +#define DCACHE_SHRINK_LIST BIT(10) -#define DCACHE_OP_WEAK_REVALIDATE 0x00000800 +#define DCACHE_OP_WEAK_REVALIDATE BIT(11) -#define DCACHE_NFSFS_RENAMED 0x00001000 +#define DCACHE_NFSFS_RENAMED BIT(12) /* this dentry has been "silly renamed" and has to be deleted on the last * dput() */ -#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */ -#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000 +#define DCACHE_FSNOTIFY_PARENT_WATCHED BIT(14) /* Parent inode is watched by some fsnotify listener */ -#define DCACHE_DENTRY_KILLED 0x00008000 +#define DCACHE_DENTRY_KILLED BIT(15) -#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */ -#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */ -#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */ +#define DCACHE_MOUNTED BIT(16) /* is a mountpoint */ +#define DCACHE_NEED_AUTOMOUNT BIT(17) /* handle automount on this dir */ +#define DCACHE_MANAGE_TRANSIT BIT(18) /* manage transit from this dirent */ #define DCACHE_MANAGED_DENTRY \ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) -#define DCACHE_LRU_LIST 0x00080000 +#define DCACHE_LRU_LIST BIT(19) -#define DCACHE_ENTRY_TYPE 0x00700000 -#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */ -#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */ -#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */ -#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */ -#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */ -#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */ -#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */ +#define DCACHE_ENTRY_TYPE (7 << 20) /* bits 20..22 are for storing type: */ +#define DCACHE_MISS_TYPE (0 << 20) /* Negative dentry */ +#define DCACHE_WHITEOUT_TYPE (1 << 20) /* Whiteout dentry (stop pathwalk) */ +#define DCACHE_DIRECTORY_TYPE (2 << 20) /* Normal directory */ +#define DCACHE_AUTODIR_TYPE (3 << 20) /* Lookupless directory (presumed automount) */ +#define DCACHE_REGULAR_TYPE (4 << 20) /* Regular file type */ +#define DCACHE_SPECIAL_TYPE (5 << 20) /* Other file type */ +#define DCACHE_SYMLINK_TYPE (6 << 20) /* Symlink */ -#define DCACHE_MAY_FREE 0x00800000 -#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ -#define DCACHE_NOKEY_NAME 0x02000000 /* Encrypted name encoded without key */ -#define DCACHE_OP_REAL 0x04000000 +#define DCACHE_NOKEY_NAME BIT(25) /* Encrypted name encoded without key */ +#define DCACHE_OP_REAL BIT(26) -#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ -#define DCACHE_DENTRY_CURSOR 0x20000000 -#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */ +#define DCACHE_PAR_LOOKUP BIT(28) /* being looked up (with parent locked shared) */ +#define DCACHE_DENTRY_CURSOR BIT(29) +#define DCACHE_NORCU BIT(30) /* No RCU delay for freeing */ extern seqlock_t rename_lock; @@ -220,8 +216,6 @@ extern seqlock_t rename_lock; */ extern void d_instantiate(struct dentry *, struct inode *); extern void d_instantiate_new(struct dentry *, struct inode *); -extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); -extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); extern void d_delete(struct dentry *); @@ -242,15 +236,12 @@ extern struct dentry * d_obtain_alias(struct inode *); extern struct dentry * d_obtain_root(struct inode *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); -extern void shrink_dcache_for_umount(struct super_block *); extern void d_invalidate(struct dentry *); /* only used at mount-time */ extern struct dentry * d_make_root(struct inode *); -/* <clickety>-<click> the ramfs-type tree */ -extern void d_genocide(struct dentry *); - +extern void d_mark_tmpfile(struct file *, struct inode *); extern void d_tmpfile(struct file *, struct inode *); extern struct dentry *d_find_alias(struct inode *); @@ -273,12 +264,8 @@ extern void d_move(struct dentry *, struct dentry *); extern void d_exchange(struct dentry *, struct dentry *); extern struct dentry *d_ancestor(struct dentry *, struct dentry *); -/* appendix may either be NULL or be used for transname suffixes */ extern struct dentry *d_lookup(const struct dentry *, const struct qstr *); extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); -extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *); -extern struct dentry *__d_lookup_rcu(const struct dentry *parent, - const struct qstr *name, unsigned *seq); static inline unsigned d_count(const struct dentry *dentry) { @@ -300,20 +287,40 @@ extern char *dentry_path(const struct dentry *, char *, int); /* Allocation counts.. */ /** - * dget, dget_dlock - get a reference to a dentry - * @dentry: dentry to get a reference to + * dget_dlock - get a reference to a dentry + * @dentry: dentry to get a reference to * - * Given a dentry or %NULL pointer increment the reference count - * if appropriate and return the dentry. A dentry will not be - * destroyed when it has references. + * Given a live dentry, increment the reference count and return the dentry. + * Caller must hold @dentry->d_lock. Making sure that dentry is alive is + * caller's resonsibility. There are many conditions sufficient to guarantee + * that; e.g. anything with non-negative refcount is alive, so's anything + * hashed, anything positive, anyone's parent, etc. */ static inline struct dentry *dget_dlock(struct dentry *dentry) { - if (dentry) - dentry->d_lockref.count++; + dentry->d_lockref.count++; return dentry; } + +/** + * dget - get a reference to a dentry + * @dentry: dentry to get a reference to + * + * Given a dentry or %NULL pointer increment the reference count + * if appropriate and return the dentry. A dentry will not be + * destroyed when it has references. Conversely, a dentry with + * no references can disappear for any number of reasons, starting + * with memory pressure. In other words, that primitive is + * used to clone an existing reference; using it on something with + * zero refcount is a bug. + * + * NOTE: it will spin if @dentry->d_lock is held. From the deadlock + * avoidance point of view it is equivalent to spin_lock()/increment + * refcount/spin_unlock(), so calling it under @dentry->d_lock is + * always a bug; so's calling it under ->d_lock on any of its descendents. + * + */ static inline struct dentry *dget(struct dentry *dentry) { if (dentry) @@ -324,12 +331,11 @@ static inline struct dentry *dget(struct dentry *dentry) extern struct dentry *dget_parent(struct dentry *dentry); /** - * d_unhashed - is dentry hashed - * @dentry: entry to check + * d_unhashed - is dentry hashed + * @dentry: entry to check * - * Returns true if the dentry passed is not currently hashed. + * Returns true if the dentry passed is not currently hashed. */ - static inline int d_unhashed(const struct dentry *dentry) { return hlist_bl_unhashed(&dentry->d_hash); @@ -489,14 +495,6 @@ static inline int simple_positive(const struct dentry *dentry) return d_really_is_positive(dentry) && !d_unhashed(dentry); } -extern void d_set_fallthru(struct dentry *dentry); - -static inline bool d_is_fallthru(const struct dentry *dentry) -{ - return dentry->d_flags & DCACHE_FALLTHRU; -} - - extern int sysctl_vfs_cache_pressure; static inline unsigned long vfs_pressure_ratio(unsigned long val) @@ -546,21 +544,6 @@ static inline struct inode *d_backing_inode(const struct dentry *upper) } /** - * d_backing_dentry - Get upper or lower dentry we should be using - * @upper: The upper layer - * - * This is the helper that should be used to get the dentry of the inode that - * will be used if this dentry were opened as a file. It may be the upper - * dentry or it may be a lower dentry pinned by the upper. - * - * Normal filesystems should not use this to access their own dentries. - */ -static inline struct dentry *d_backing_dentry(struct dentry *upper) -{ - return upper; -} - -/** * d_real - Return the real dentry * @dentry: the dentry to query * @inode: inode to select the dentry from multiple layers (can be NULL) @@ -599,4 +582,14 @@ struct name_snapshot { void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); void release_dentry_name_snapshot(struct name_snapshot *); +static inline struct dentry *d_first_child(const struct dentry *dentry) +{ + return hlist_entry_safe(dentry->d_children.first, struct dentry, d_sib); +} + +static inline struct dentry *d_next_sibling(const struct dentry *dentry) +{ + return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib); +} + #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index ea2d919fd9c7..c9c65b132c0f 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -171,6 +171,25 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); +/** + * struct debugfs_cancellation - cancellation data + * @list: internal, for keeping track + * @cancel: callback to call + * @cancel_data: extra data for the callback to call + */ +struct debugfs_cancellation { + struct list_head list; + void (*cancel)(struct dentry *, void *); + void *cancel_data; +}; + +void __acquires(cancellation) +debugfs_enter_cancellation(struct file *file, + struct debugfs_cancellation *cancellation); +void __releases(cancellation) +debugfs_leave_cancellation(struct file *file, + struct debugfs_cancellation *cancellation); + #else #include <linux/err.h> diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 69d0435c7ebb..772ab4d74d94 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -165,6 +165,7 @@ void dm_error(const char *message); struct dm_dev { struct block_device *bdev; + struct bdev_handle *bdev_handle; struct dax_device *dax_dev; blk_mode_t mode; char name[16]; diff --git a/include/linux/device.h b/include/linux/device.h index 56d93a1ffb7b..97c4b046c09d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -42,7 +42,6 @@ struct class; struct subsys_private; struct device_node; struct fwnode_handle; -struct iommu_ops; struct iommu_group; struct dev_pin_info; struct dev_iommu; @@ -63,7 +62,7 @@ struct msi_device_data; */ struct subsys_interface { const char *name; - struct bus_type *subsys; + const struct bus_type *subsys; struct list_head node; int (*add_dev)(struct device *dev, struct subsys_interface *sif); void (*remove_dev)(struct device *dev, struct subsys_interface *sif); @@ -72,9 +71,9 @@ struct subsys_interface { int subsys_interface_register(struct subsys_interface *sif); void subsys_interface_unregister(struct subsys_interface *sif); -int subsys_system_register(struct bus_type *subsys, +int subsys_system_register(const struct bus_type *subsys, const struct attribute_group **groups); -int subsys_virtual_register(struct bus_type *subsys, +int subsys_virtual_register(const struct bus_type *subsys, const struct attribute_group **groups); /* @@ -389,8 +388,8 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data); void devm_release_action(struct device *dev, void (*action)(void *), void *data); int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name); -#define devm_add_action(release, action, data) \ - __devm_add_action(release, action, data, #action) +#define devm_add_action(dev, action, data) \ + __devm_add_action(dev, action, data, #action) static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data, const char *name) @@ -403,8 +402,8 @@ static inline int __devm_add_action_or_reset(struct device *dev, void (*action)( return ret; } -#define devm_add_action_or_reset(release, action, data) \ - __devm_add_action_or_reset(release, action, data, #action) +#define devm_add_action_or_reset(dev, action, data) \ + __devm_add_action_or_reset(dev, action, data, #action) /** * devm_alloc_percpu - Resource-managed alloc_percpu @@ -662,7 +661,6 @@ struct device_physical_location { * @id: device instance * @devres_lock: Spinlock to protect the resource of the device. * @devres_head: The resources list of the device. - * @knode_class: The node used to add the device to the class list. * @class: The class of the device. * @groups: Optional attribute groups. * @release: Callback to free the device after all references have @@ -1007,6 +1005,8 @@ static inline void device_unlock(struct device *dev) mutex_unlock(&dev->mutex); } +DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T)) + static inline void device_lock_assert(struct device *dev) { lockdep_assert_held(&dev->mutex); @@ -1071,7 +1071,6 @@ int device_rename(struct device *dev, const char *new_name); int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); -int device_is_dependent(struct device *dev, void *target); static inline bool device_supports_offline(struct device *dev) { diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index ae10c4322754..5ef4ec1c36c3 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -62,9 +62,6 @@ struct fwnode_handle; * this bus. * @pm: Power management operations of this bus, callback the specific * device driver's pm-ops. - * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU - * driver implementations to a bus and allow the driver to do - * bus-specific setup * @need_parent_lock: When probing or removing a device on this bus, the * device core should lock the device's parent. * @@ -104,8 +101,6 @@ struct bus_type { const struct dev_pm_ops *pm; - const struct iommu_ops *iommu_ops; - bool need_parent_lock; }; @@ -232,7 +227,7 @@ bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev) int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start, void *data, int (*fn)(struct device_driver *, void *)); -void bus_sort_breadthfirst(struct bus_type *bus, +void bus_sort_breadthfirst(const struct bus_type *bus, int (*compare)(const struct device *a, const struct device *b)); /* diff --git a/include/linux/device/class.h b/include/linux/device/class.h index abf3d3bfb6fe..c576b49c55c2 100644 --- a/include/linux/device/class.h +++ b/include/linux/device/class.h @@ -40,8 +40,6 @@ struct fwnode_handle; * for the devices belonging to the class. Usually tied to * device's namespace. * @pm: The default device power management operations of this class. - * @p: The private data of the driver core, no one other than the - * driver core can touch this. * * A class is a higher-level view of a device that abstracts out low-level * implementation details. Drivers may see a SCSI disk or an ATA disk, but, diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 3f31baa3293f..8ff4add71f88 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -343,16 +343,19 @@ struct dma_buf { /** * @exp_name: * - * Name of the exporter; useful for debugging. See the - * DMA_BUF_SET_NAME IOCTL. + * Name of the exporter; useful for debugging. Must not be NULL */ const char *exp_name; /** * @name: * - * Userspace-provided name; useful for accounting and debugging, - * protected by dma_resv_lock() on @resv and @name_lock for read access. + * Userspace-provided name. Default value is NULL. If not NULL, + * length cannot be longer than DMA_BUF_NAME_LEN, including NIL + * char. Useful for accounting and debugging. Read/Write accesses + * are protected by @name_lock + * + * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B */ const char *name; diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 18aade195884..3eb3589ff43e 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -21,7 +21,6 @@ struct bus_dma_region { phys_addr_t cpu_start; dma_addr_t dma_start; u64 size; - u64 offset; }; static inline dma_addr_t translate_phys_to_dma(struct device *dev, @@ -29,9 +28,12 @@ static inline dma_addr_t translate_phys_to_dma(struct device *dev, { const struct bus_dma_region *m; - for (m = dev->dma_range_map; m->size; m++) - if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size) - return (dma_addr_t)paddr - m->offset; + for (m = dev->dma_range_map; m->size; m++) { + u64 offset = paddr - m->cpu_start; + + if (paddr >= m->cpu_start && offset < m->size) + return m->dma_start + offset; + } /* make sure dma_capable fails when no translation is available */ return DMA_MAPPING_ERROR; @@ -42,9 +44,12 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev, { const struct bus_dma_region *m; - for (m = dev->dma_range_map; m->size; m++) - if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size) - return (phys_addr_t)dma_addr + m->offset; + for (m = dev->dma_range_map; m->size; m++) { + u64 offset = dma_addr - m->dma_start; + + if (dma_addr >= m->dma_start && offset < m->size) + return m->cpu_start + offset; + } return (phys_addr_t)-1; } diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 0d678e9a7b24..e06bad467f55 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -21,6 +21,7 @@ #include <linux/sched.h> #include <linux/printk.h> #include <linux/rcupdate.h> +#include <linux/timekeeping.h> struct dma_fence; struct dma_fence_ops; @@ -499,6 +500,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1, } /** + * dma_fence_is_later_or_same - return true if f1 is later or same as f2 + * @f1: the first fence from the same context + * @f2: the second fence from the same context + * + * Returns true if f1 is chronologically later than f2 or the same fence. Both + * fences must be from the same context, since a seqno is not re-used across + * contexts. + */ +static inline bool dma_fence_is_later_or_same(struct dma_fence *f1, + struct dma_fence *f2) +{ + return f1 == f2 || dma_fence_is_later(f1, f2); +} + +/** * dma_fence_later - return the chronologically later fence * @f1: the first fence from the same context * @f2: the second fence from the same context @@ -568,6 +584,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence, fence->error = error; } +/** + * dma_fence_timestamp - helper to get the completion timestamp of a fence + * @fence: fence to get the timestamp from. + * + * After a fence is signaled the timestamp is updated with the signaling time, + * but setting the timestamp can race with tasks waiting for the signaling. This + * helper busy waits for the correct timestamp to appear. + */ +static inline ktime_t dma_fence_timestamp(struct dma_fence *fence) +{ + if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) + return ktime_get(); + + while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) + cpu_relax(); + + return fence->timestamp; +} + signed long dma_fence_wait_timeout(struct dma_fence *, bool intr, signed long timeout); signed long dma_fence_wait_any_timeout(struct dma_fence **fences, diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index f2fc203fb8a1..4abc60f04209 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -11,6 +11,7 @@ #include <linux/slab.h> struct cma; +struct iommu_ops; /* * Values for struct dma_map_ops.flags: @@ -426,10 +427,10 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent); + bool coherent); #else static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size, const struct iommu_ops *iommu, bool coherent) + u64 size, bool coherent) { } #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ @@ -443,10 +444,10 @@ static inline void arch_teardown_dma_ops(struct device *dev) #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ #ifdef CONFIG_DMA_API_DEBUG -void dma_debug_add_bus(struct bus_type *bus); +void dma_debug_add_bus(const struct bus_type *bus); void debug_dma_dump_mappings(struct device *dev); #else -static inline void dma_debug_add_bus(struct bus_type *bus) +static inline void dma_debug_add_bus(const struct bus_type *bus) { } static inline void debug_dma_dump_mappings(struct device *dev) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index f0ccca16a0ac..4a658de44ee9 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -144,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev); int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); +bool dma_addressing_limited(struct device *dev); size_t dma_max_mapping_size(struct device *dev); size_t dma_opt_mapping_size(struct device *dev); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); @@ -264,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev) { return 0; } +static inline bool dma_addressing_limited(struct device *dev) +{ + return false; +} static inline size_t dma_max_mapping_size(struct device *dev) { return 0; @@ -465,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) return dma_set_mask_and_coherent(dev, mask); } -/** - * dma_addressing_limited - return if the device is addressing limited - * @dev: device to check - * - * Return %true if the devices DMA mask is too small to address all memory in - * the system, else %false. Lack of addressing bits is the prime reason for - * bounce buffering, but might not be the only one. - */ -static inline bool dma_addressing_limited(struct device *dev) -{ - return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < - dma_get_required_mask(dev); -} - static inline unsigned int dma_get_max_seg_size(struct device *dev) { if (dev->dma_parms && dev->dma_parms->max_segment_size) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c3656e590213..752dbde4cec1 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -517,8 +517,6 @@ static inline const char *dma_chan_name(struct dma_chan *chan) return dev_name(&chan->dev->device); } -void dma_chan_cleanup(struct kref *kref); - /** * typedef dma_filter_fn - callback filter for dma_request_channel * @chan: channel to be reviewed @@ -955,7 +953,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan, static inline bool is_slave_direction(enum dma_transfer_direction direction) { - return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); + return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) || + (direction == DMA_DEV_TO_DEV); } static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( diff --git a/include/linux/dpll.h b/include/linux/dpll.h new file mode 100644 index 000000000000..9cf896ea1d41 --- /dev/null +++ b/include/linux/dpll.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates + * Copyright (c) 2023 Intel and affiliates + */ + +#ifndef __DPLL_H__ +#define __DPLL_H__ + +#include <uapi/linux/dpll.h> +#include <linux/device.h> +#include <linux/netlink.h> + +struct dpll_device; +struct dpll_pin; + +struct dpll_device_ops { + int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv, + enum dpll_mode *mode, struct netlink_ext_ack *extack); + int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv, + enum dpll_lock_status *status, + struct netlink_ext_ack *extack); + int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv, + s32 *temp, struct netlink_ext_ack *extack); +}; + +struct dpll_pin_ops { + int (*frequency_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const u64 frequency, + struct netlink_ext_ack *extack); + int (*frequency_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 *frequency, struct netlink_ext_ack *extack); + int (*direction_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const enum dpll_pin_direction direction, + struct netlink_ext_ack *extack); + int (*direction_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_direction *direction, + struct netlink_ext_ack *extack); + int (*state_on_pin_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *parent_pin, + void *parent_pin_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack); + int (*state_on_dpll_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, enum dpll_pin_state *state, + struct netlink_ext_ack *extack); + int (*state_on_pin_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *parent_pin, + void *parent_pin_priv, + const enum dpll_pin_state state, + struct netlink_ext_ack *extack); + int (*state_on_dpll_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, + const enum dpll_pin_state state, + struct netlink_ext_ack *extack); + int (*prio_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u32 *prio, struct netlink_ext_ack *extack); + int (*prio_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const u32 prio, struct netlink_ext_ack *extack); + int (*phase_offset_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s64 *phase_offset, + struct netlink_ext_ack *extack); + int (*phase_adjust_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 *phase_adjust, + struct netlink_ext_ack *extack); + int (*phase_adjust_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const s32 phase_adjust, + struct netlink_ext_ack *extack); + int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s64 *ffo, struct netlink_ext_ack *extack); +}; + +struct dpll_pin_frequency { + u64 min; + u64 max; +}; + +#define DPLL_PIN_FREQUENCY_RANGE(_min, _max) \ + { \ + .min = _min, \ + .max = _max, \ + } + +#define DPLL_PIN_FREQUENCY(_val) DPLL_PIN_FREQUENCY_RANGE(_val, _val) +#define DPLL_PIN_FREQUENCY_1PPS \ + DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_1_HZ) +#define DPLL_PIN_FREQUENCY_10MHZ \ + DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_MHZ) +#define DPLL_PIN_FREQUENCY_IRIG_B \ + DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_KHZ) +#define DPLL_PIN_FREQUENCY_DCF77 \ + DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_77_5_KHZ) + +struct dpll_pin_phase_adjust_range { + s32 min; + s32 max; +}; + +struct dpll_pin_properties { + const char *board_label; + const char *panel_label; + const char *package_label; + enum dpll_pin_type type; + unsigned long capabilities; + u32 freq_supported_num; + struct dpll_pin_frequency *freq_supported; + struct dpll_pin_phase_adjust_range phase_range; +}; + +#if IS_ENABLED(CONFIG_DPLL) +size_t dpll_msg_pin_handle_size(struct dpll_pin *pin); +int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin); +#else +static inline size_t dpll_msg_pin_handle_size(struct dpll_pin *pin) +{ + return 0; +} + +static inline int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin) +{ + return 0; +} +#endif + +struct dpll_device * +dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module); + +void dpll_device_put(struct dpll_device *dpll); + +int dpll_device_register(struct dpll_device *dpll, enum dpll_type type, + const struct dpll_device_ops *ops, void *priv); + +void dpll_device_unregister(struct dpll_device *dpll, + const struct dpll_device_ops *ops, void *priv); + +struct dpll_pin * +dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module, + const struct dpll_pin_properties *prop); + +int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin, + const struct dpll_pin_ops *ops, void *priv); + +void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin, + const struct dpll_pin_ops *ops, void *priv); + +void dpll_pin_put(struct dpll_pin *pin); + +int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin, + const struct dpll_pin_ops *ops, void *priv); + +void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin, + const struct dpll_pin_ops *ops, void *priv); + +int dpll_device_change_ntf(struct dpll_device *dpll); + +int dpll_pin_change_ntf(struct dpll_pin *pin); + +#endif diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index c177322f793d..b9dd35d4b8f5 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -28,7 +28,7 @@ /* Source and Destination MAC of follow-up meta frames. * Whereas the choice of SMAC only affects the unique identification of the * switch as sender of meta frames, the DMAC must be an address that is present - * in the DSA master port's multicast MAC filter. + * in the DSA conduit port's multicast MAC filter. * 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588 * over L2 use this address for some purpose already. */ diff --git a/include/linux/edac.h b/include/linux/edac.h index fa4bda2a70f6..b4ee8961e623 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -30,7 +30,7 @@ struct device; extern int edac_op_state; -struct bus_type *edac_get_sysfs_subsys(void); +const struct bus_type *edac_get_sysfs_subsys(void); static inline void opstate_init(void) { @@ -187,6 +187,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_NVDIMM: Non-volatile RAM * @MEM_WIO2: Wide I/O 2. * @MEM_HBM2: High bandwidth Memory Gen 2. + * @MEM_HBM3: High bandwidth Memory Gen 3. */ enum mem_type { MEM_EMPTY = 0, @@ -218,6 +219,7 @@ enum mem_type { MEM_NVDIMM, MEM_WIO2, MEM_HBM2, + MEM_HBM3, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -248,6 +250,7 @@ enum mem_type { #define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) #define MEM_FLAG_WIO2 BIT(MEM_WIO2) #define MEM_FLAG_HBM2 BIT(MEM_HBM2) +#define MEM_FLAG_HBM3 BIT(MEM_HBM3) /** * enum edac_type - Error Detection and Correction capabilities and mode @@ -492,7 +495,7 @@ struct edac_raw_error_desc { */ struct mem_ctl_info { struct device dev; - struct bus_type *bus; + const struct bus_type *bus; struct list_head link; /* for global list of mem_ctl_info structs */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 80b21d1c6eaf..c74f47711f0b 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -40,6 +40,7 @@ struct screen_info; #define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1))) #define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) +#define EFI_ACCESS_DENIED (15 | (1UL << (BITS_PER_LONG-1))) #define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1))) #define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1))) #define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1))) @@ -358,13 +359,10 @@ void efi_native_runtime_setup(void); * where the UEFI SPEC breaks the line. */ #define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) -#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) -#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) -#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) #define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) #define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) #define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) @@ -851,10 +849,6 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len) return 1; } -#ifdef CONFIG_EFI_PCDP -extern int __init efi_setup_pcdp_console(char *); -#endif - /* * We play games with efi_enabled so that the compiler will, if * possible, remove EFI-related code altogether. @@ -1355,4 +1349,15 @@ bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table) umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n); +/* + * efivar ops event type + */ +#define EFIVAR_OPS_RDONLY 0 +#define EFIVAR_OPS_RDWR 1 + +extern struct blocking_notifier_head efivar_ops_nh; + +void efivars_generic_ops_register(void); +void efivars_generic_ops_unregister(void); + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index b9caa01dfac4..88d91e087471 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -224,7 +224,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util, unsigned long allowed_cpu_cap) { - unsigned long freq, scale_cpu; + unsigned long freq, ref_freq, scale_cpu; struct em_perf_state *ps; int cpu; @@ -241,11 +241,10 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, */ cpu = cpumask_first(to_cpumask(pd->cpus)); scale_cpu = arch_scale_cpu_capacity(cpu); - ps = &pd->table[pd->nr_perf_states - 1]; + ref_freq = arch_scale_freq_ref(cpu); - max_util = map_util_perf(max_util); max_util = min(max_util, allowed_cpu_cap); - freq = map_util_freq(max_util, ps->frequency, scale_cpu); + freq = map_util_freq(max_util, ref_freq, scale_cpu); /* * Find the lowest performance state of the Energy Model above the diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index d95ab85f96ba..b0fb775a600d 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -7,6 +7,11 @@ #include <linux/syscalls.h> #include <linux/seccomp.h> #include <linux/sched.h> +#include <linux/context_tracking.h> +#include <linux/livepatch.h> +#include <linux/resume_user_mode.h> +#include <linux/tick.h> +#include <linux/kmsan.h> #include <asm/entry-common.h> @@ -98,7 +103,19 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} * done between establishing state and enabling interrupts. The caller must * enable interrupts before invoking syscall_enter_from_user_mode_work(). */ -void enter_from_user_mode(struct pt_regs *regs); +static __always_inline void enter_from_user_mode(struct pt_regs *regs) +{ + arch_enter_from_user_mode(regs); + lockdep_hardirqs_off(CALLER_ADDR0); + + CT_WARN_ON(__ct_state() != CONTEXT_USER); + user_exit_irqoff(); + + instrumentation_begin(); + kmsan_unpoison_entry_regs(regs); + trace_hardirqs_off_finish(); + instrumentation_end(); +} /** * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts @@ -117,6 +134,9 @@ void enter_from_user_mode(struct pt_regs *regs); */ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); +long syscall_trace_enter(struct pt_regs *regs, long syscall, + unsigned long work); + /** * syscall_enter_from_user_mode_work - Check and handle work before invoking * a syscall @@ -140,7 +160,15 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter() * 2) Invocation of audit_syscall_entry() */ -long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); +static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall) +{ + unsigned long work = READ_ONCE(current_thread_info()->syscall_work); + + if (work & SYSCALL_WORK_ENTER) + syscall = syscall_trace_enter(regs, syscall, work); + + return syscall; +} /** * syscall_enter_from_user_mode - Establish state and check and handle work @@ -159,7 +187,19 @@ long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); * Returns: The original or a modified syscall number. See * syscall_enter_from_user_mode_work() for further explanation. */ -long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); +static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) +{ + long ret; + + enter_from_user_mode(regs); + + instrumentation_begin(); + local_irq_enable(); + ret = syscall_enter_from_user_mode_work(regs, syscall); + instrumentation_end(); + + return ret; +} /** * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() @@ -259,6 +299,43 @@ static __always_inline void arch_exit_to_user_mode(void) { } void arch_do_signal_or_restart(struct pt_regs *regs); /** + * exit_to_user_mode_loop - do any pending work before leaving to user space + */ +unsigned long exit_to_user_mode_loop(struct pt_regs *regs, + unsigned long ti_work); + +/** + * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required + * @regs: Pointer to pt_regs on entry stack + * + * 1) check that interrupts are disabled + * 2) call tick_nohz_user_enter_prepare() + * 3) call exit_to_user_mode_loop() if any flags from + * EXIT_TO_USER_MODE_WORK are set + * 4) check that interrupts are still disabled + */ +static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) +{ + unsigned long ti_work; + + lockdep_assert_irqs_disabled(); + + /* Flush pending rcuog wakeup before the last need_resched() check */ + tick_nohz_user_enter_prepare(); + + ti_work = read_thread_flags(); + if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) + ti_work = exit_to_user_mode_loop(regs, ti_work); + + arch_exit_to_user_mode_prepare(regs, ti_work); + + /* Ensure that kernel state is sane for a return to userspace */ + kmap_assert_nomap(); + lockdep_assert_irqs_disabled(); + lockdep_sys_exit(); +} + +/** * exit_to_user_mode - Fixup state when exiting to user mode * * Syscall/interrupt exit enables interrupts, but the kernel state is @@ -276,7 +353,17 @@ void arch_do_signal_or_restart(struct pt_regs *regs); * non-instrumentable. * The caller has to invoke syscall_exit_to_user_mode_work() before this. */ -void exit_to_user_mode(void); +static __always_inline void exit_to_user_mode(void) +{ + instrumentation_begin(); + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(); + instrumentation_end(); + + user_enter_irqoff(); + arch_exit_to_user_mode(); + lockdep_hardirqs_on(CALLER_ADDR0); +} /** * syscall_exit_to_user_mode_work - Handle work before returning to user mode diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 62b61527bcc4..325e0778e937 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -95,6 +95,7 @@ struct kernel_ethtool_ringparam { * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push * @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len + * @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split */ enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), @@ -102,6 +103,7 @@ enum ethtool_supported_ring_param { ETHTOOL_RING_USE_TX_PUSH = BIT(2), ETHTOOL_RING_USE_RX_PUSH = BIT(3), ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4), + ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5), }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) @@ -409,8 +411,10 @@ struct ethtool_pause_stats { * not entire FEC data blocks. This is a non-standard statistic. * Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS. * - * @lane: per-lane/PCS-instance counts as defined by the standard - * @total: error counts for the entire port, for drivers incapable of reporting + * For each of the above fields, the two substructure members are: + * + * - @lanes: per-lane/PCS-instance counts as defined by the standard + * - @total: error counts for the entire port, for drivers incapable of reporting * per-lane stats * * Drivers should fill in either only total or per-lane statistics, core @@ -595,9 +599,46 @@ struct ethtool_mm_stats { }; /** + * struct ethtool_rxfh_param - RXFH (RSS) parameters + * @hfunc: Defines the current RSS hash function used by HW (or to be set to). + * Valid values are one of the %ETH_RSS_HASH_*. + * @indir_size: On SET, the array size of the user buffer for the + * indirection table, which may be zero, or + * %ETH_RXFH_INDIR_NO_CHANGE. On GET (read from the driver), + * the array size of the hardware indirection table. + * @indir: The indirection table of size @indir_size entries. + * @key_size: On SET, the array size of the user buffer for the hash key, + * which may be zero. On GET (read from the driver), the size of the + * hardware hash key. + * @key: The hash key of size @key_size bytes. + * @rss_context: RSS context identifier. Context 0 is the default for normal + * traffic; other contexts can be referenced as the destination for RX flow + * classification rules. On SET, %ETH_RXFH_CONTEXT_ALLOC is used + * to allocate a new RSS context; on return this field will + * contain the ID of the newly allocated context. + * @rss_delete: Set to non-ZERO to remove the @rss_context context. + * @input_xfrm: Defines how the input data is transformed. Valid values are one + * of %RXH_XFRM_*. + */ +struct ethtool_rxfh_param { + u8 hfunc; + u32 indir_size; + u32 *indir; + u32 key_size; + u8 *key; + u32 rss_context; + u8 rss_delete; + u8 input_xfrm; +}; + +/** * struct ethtool_ops - optional netdev operations * @cap_link_lanes_supported: indicates if the driver supports lanes * parameter. + * @cap_rss_ctx_supported: indicates if the driver supports RSS + * contexts. + * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor + * RSS. * @supported_coalesce_params: supported types of interrupt coalescing. * @supported_ring_params: supported ring params. * @get_drvinfo: Report driver/device information. Modern drivers no @@ -694,15 +735,6 @@ struct ethtool_mm_stats { * will remain unchanged. * Returns a negative error code or zero. An error code must be returned * if at least one unsupported change was requested. - * @get_rxfh_context: Get the contents of the RX flow hash indirection table, - * hash key, and/or hash function assiciated to the given rss context. - * Returns a negative error code or zero. - * @set_rxfh_context: Create, remove and configure RSS contexts. Allows setting - * the contents of the RX flow hash indirection table, hash key, and/or - * hash function associated to the given context. Arguments which are set - * to %NULL or zero will remain unchanged. - * Returns a negative error code or zero. An error code must be returned - * if at least one unsupported change was requested. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or * zero. @@ -785,6 +817,8 @@ struct ethtool_mm_stats { */ struct ethtool_ops { u32 cap_link_lanes_supported:1; + u32 cap_rss_ctx_supported:1; + u32 cap_rss_sym_xor_supported:1; u32 supported_coalesce_params; u32 supported_ring_params; void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); @@ -844,15 +878,9 @@ struct ethtool_ops { int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); - int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, - u8 *hfunc); - int (*set_rxfh)(struct net_device *, const u32 *indir, - const u8 *key, const u8 hfunc); - int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key, - u8 *hfunc, u32 rss_context); - int (*set_rxfh_context)(struct net_device *, const u32 *indir, - const u8 *key, const u8 hfunc, - u32 *rss_context, bool delete); + int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *); + int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *, + struct netlink_ext_ack *extack); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); @@ -1044,12 +1072,52 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add, } /** + * ethtool_get_ts_info_by_layer - Obtains time stamping capabilities from the MAC or PHY layer. + * @dev: pointer to net_device structure + * @info: buffer to hold the result + * Returns zero on success, non-zero otherwise. + */ +int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info); + +/** * ethtool_sprintf - Write formatted string to ethtool string data - * @data: Pointer to start of string to update + * @data: Pointer to a pointer to the start of string to update * @fmt: Format of string to write * - * Write formatted string to data. Update data to point at start of + * Write formatted string to *data. Update *data to point at start of * next string. */ extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...); + +/** + * ethtool_puts - Write string to ethtool string data + * @data: Pointer to a pointer to the start of string to update + * @str: String to write + * + * Write string to *data without a trailing newline. Update *data + * to point at start of next string. + * + * Prefer this function to ethtool_sprintf() when given only + * two arguments or if @fmt is just "%s". + */ +extern void ethtool_puts(u8 **data, const char *str); + +/* Link mode to forced speed capabilities maps */ +struct ethtool_forced_speed_map { + u32 speed; + __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); + + const u32 *cap_arr; + u32 arr_size; +}; + +#define ETHTOOL_FORCED_SPEED_MAP(prefix, value) \ +{ \ + .speed = SPEED_##value, \ + .cap_arr = prefix##_##value, \ + .arr_size = ARRAY_SIZE(prefix##_##value), \ +} + +void +ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size); #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index b9d83652c097..e32bee4345fb 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -35,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); -__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); -__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask); +void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); @@ -58,15 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) return ERR_PTR(-ENOSYS); } -static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n) +static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) { - return -ENOSYS; -} - -static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, - unsigned mask) -{ - return -ENOSYS; } static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) @@ -92,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) #endif +static inline void eventfd_signal(struct eventfd_ctx *ctx) +{ + eventfd_signal_mask(ctx, 0); +} + #endif /* _LINUX_EVENTFD_H */ diff --git a/include/linux/evm.h b/include/linux/evm.h index 01fc495a83e2..36ec884320d9 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -31,6 +31,7 @@ extern void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len); +extern int evm_inode_copy_up_xattr(const char *name); extern int evm_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *xattr_name); extern void evm_inode_post_removexattr(struct dentry *dentry, @@ -117,6 +118,11 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry, return; } +static inline int evm_inode_copy_up_xattr(const char *name) +{ + return 0; +} + static inline int evm_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *xattr_name) diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h index 1c849db953a5..d445705ac13c 100644 --- a/include/linux/export-internal.h +++ b/include/linux/export-internal.h @@ -16,10 +16,13 @@ * and eliminates the need for absolute relocations that require runtime * processing on relocatable kernels. */ +#define __KSYM_ALIGN ".balign 4" #define __KSYM_REF(sym) ".long " #sym "- ." #elif defined(CONFIG_64BIT) +#define __KSYM_ALIGN ".balign 8" #define __KSYM_REF(sym) ".quad " #sym #else +#define __KSYM_ALIGN ".balign 4" #define __KSYM_REF(sym) ".long " #sym #endif @@ -42,7 +45,7 @@ " .asciz \"" ns "\"" "\n" \ " .previous" "\n" \ " .section \"___ksymtab" sec "+" #name "\", \"a\"" "\n" \ - " .balign 4" "\n" \ + __KSYM_ALIGN "\n" \ "__ksymtab_" #name ":" "\n" \ __KSYM_REF(sym) "\n" \ __KSYM_REF(__kstrtab_ ##name) "\n" \ @@ -50,8 +53,8 @@ " .previous" "\n" \ ) -#ifdef CONFIG_IA64 -#define KSYM_FUNC(name) @fptr(name) +#if defined(CONFIG_PARISC) && defined(CONFIG_64BIT) +#define KSYM_FUNC(name) P%name #else #define KSYM_FUNC(name) name #endif @@ -61,6 +64,7 @@ #define SYMBOL_CRC(sym, crc, sec) \ asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \ + ".balign 4" "\n" \ "__crc_" #sym ":" "\n" \ ".long " #crc "\n" \ ".previous" "\n") diff --git a/include/linux/export.h b/include/linux/export.h index beed8387e0a4..0bbd02fd351d 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -7,15 +7,6 @@ #include <linux/stringify.h> /* - * Export symbols from the kernel to modules. Forked from module.h - * to reduce the amount of pointless cruft we feed to gcc when only - * exporting a simple symbol or two. - * - * Try not to add #includes here. It slows compilation and makes kernel - * hackers place grumpy comments in header files. - */ - -/* * This comment block is used by fixdep. Please do not remove. * * When CONFIG_MODVERSIONS is changed from n to y, all source files having @@ -23,15 +14,6 @@ * side effect of the *.o build rule. */ -#ifndef __ASSEMBLY__ -#ifdef MODULE -extern struct module __this_module; -#define THIS_MODULE (&__this_module) -#else -#define THIS_MODULE ((struct module *)0) -#endif -#endif /* __ASSEMBLY__ */ - #ifdef CONFIG_64BIT #define __EXPORT_SYMBOL_REF(sym) \ .balign 8 ASM_NL \ @@ -50,7 +32,7 @@ extern struct module __this_module; __EXPORT_SYMBOL_REF(sym) ASM_NL \ .previous -#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS) +#if defined(__DISABLE_EXPORTS) /* * Allow symbol exports to be disabled completely so that C code may @@ -75,7 +57,7 @@ extern struct module __this_module; __ADDRESSABLE(sym) \ asm(__stringify(___EXPORT_SYMBOL(sym, license, ns))) -#endif /* CONFIG_MODULES */ +#endif #ifdef DEFAULT_SYMBOL_NAMESPACE #define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE)) diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 11fbd0ee1370..bb37ad5cc954 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -99,12 +99,29 @@ enum fid_type { FILEID_FAT_WITH_PARENT = 0x72, /* + * 64 bit inode number, 32 bit generation number. + */ + FILEID_INO64_GEN = 0x81, + + /* + * 64 bit inode number, 32 bit generation number, + * 64 bit parent inode number, 32 bit parent generation. + */ + FILEID_INO64_GEN_PARENT = 0x82, + + /* * 128 bit child FID (struct lu_fid) * 128 bit parent FID (struct lu_fid) */ FILEID_LUSTRE = 0x97, /* + * 64 bit inode number, 32 bit subvolume, 32 bit generation number: + */ + FILEID_BCACHEFS_WITHOUT_PARENT = 0xb1, + FILEID_BCACHEFS_WITH_PARENT = 0xb2, + + /* * 64 bit unique kernfs id */ FILEID_KERNFS = 0xfe, @@ -123,7 +140,11 @@ struct fid { u32 parent_ino; u32 parent_gen; } i32; - struct { + struct { + u64 ino; + u32 gen; + } __packed i64; + struct { u32 block; u16 partref; u16 parent_partref; @@ -224,15 +245,56 @@ struct export_operations { atomic attribute updates */ #define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */ +#define EXPORT_OP_ASYNC_LOCK (0x40) /* fs can do async lock request */ unsigned long flags; }; +/** + * exportfs_lock_op_is_async() - export op supports async lock operation + * @export_ops: the nfs export operations to check + * + * Returns true if the nfs export_operations structure has + * EXPORT_OP_ASYNC_LOCK in their flags set + */ +static inline bool +exportfs_lock_op_is_async(const struct export_operations *export_ops) +{ + return export_ops->flags & EXPORT_OP_ASYNC_LOCK; +} + extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, int *max_len, struct inode *parent, int flags); extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len, int flags); +static inline bool exportfs_can_encode_fid(const struct export_operations *nop) +{ + return !nop || nop->encode_fh; +} + +static inline bool exportfs_can_decode_fh(const struct export_operations *nop) +{ + return nop && nop->fh_to_dentry; +} + +static inline bool exportfs_can_encode_fh(const struct export_operations *nop, + int fh_flags) +{ + /* + * If a non-decodeable file handle was requested, we only need to make + * sure that filesystem did not opt-out of encoding fid. + */ + if (fh_flags & EXPORT_FH_FID) + return exportfs_can_encode_fid(nop); + + /* + * If a decodeable file handle was requested, we need to make sure that + * filesystem can also decode file handles. + */ + return exportfs_can_decode_fh(nop); +} + static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid, int *max_len) { @@ -252,10 +314,12 @@ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, /* * Generic helpers for filesystems. */ -extern struct dentry *generic_fh_to_dentry(struct super_block *sb, +int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len, + struct inode *parent); +struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); -extern struct dentry *generic_fh_to_parent(struct super_block *sb, +struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index a82a4bb6ce68..053137a0fe45 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -13,10 +13,10 @@ #define F2FS_SUPER_OFFSET 1024 /* byte-size offset */ #define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */ -#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */ -#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */ -#define F2FS_BLKSIZE 4096 /* support only 4KB block */ -#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ +#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */ +#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */ +#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */ +#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ #define F2FS_EXTENSION_LEN 8 /* max size of extension */ #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS) @@ -104,6 +104,7 @@ enum f2fs_error { ERROR_CORRUPTED_VERITY_XATTR, ERROR_CORRUPTED_XATTR, ERROR_INVALID_NODE_REFERENCE, + ERROR_INCONSISTENT_NAT, ERROR_MAX, }; @@ -210,14 +211,14 @@ struct f2fs_checkpoint { unsigned char sit_nat_version_bitmap[]; } __packed; -#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */ +#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */ #define CP_MIN_CHKSUM_OFFSET \ (offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap)) /* * For orphan inode management */ -#define F2FS_ORPHANS_PER_BLOCK 1020 +#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32)) #define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \ F2FS_ORPHANS_PER_BLOCK) @@ -243,14 +244,31 @@ struct f2fs_extent { #define F2FS_NAME_LEN 255 /* 200 bytes for inline xattrs by default */ #define DEFAULT_INLINE_XATTR_ADDRS 50 -#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ + +#define OFFSET_OF_END_OF_I_EXT 360 +#define SIZE_OF_I_NID 20 + +struct node_footer { + __le32 nid; /* node id */ + __le32 ino; /* inode number */ + __le32 flag; /* include cold/fsync/dentry marks and offset */ + __le64 cp_ver; /* checkpoint version */ + __le32 next_blkaddr; /* next node page block address */ +} __packed; + +/* Address Pointers in an Inode */ +#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \ + - SIZE_OF_I_NID \ + - sizeof(struct node_footer)) / sizeof(__le32)) #define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \ get_extra_isize(inode)) #define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ #define ADDRS_PER_INODE(inode) addrs_per_inode(inode) -#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ +/* Address Pointers in a Direct Block */ +#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32)) #define ADDRS_PER_BLOCK(inode) addrs_per_block(inode) -#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ +/* Node IDs in an Indirect Block */ +#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32)) #define ADDRS_PER_PAGE(page, inode) \ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode)) @@ -342,14 +360,6 @@ enum { #define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0) -struct node_footer { - __le32 nid; /* node id */ - __le32 ino; /* inode number */ - __le32 flag; /* include cold/fsync/dentry marks and offset */ - __le64 cp_ver; /* checkpoint version */ - __le32 next_blkaddr; /* next node page block address */ -} __packed; - struct f2fs_node { /* can be one of three types: inode, direct, and indirect types */ union { @@ -363,7 +373,7 @@ struct f2fs_node { /* * For NAT entries */ -#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) +#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry)) struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ @@ -378,12 +388,13 @@ struct f2fs_nat_block { /* * For SIT entries * - * Each segment is 2MB in size by default so that a bitmap for validity of - * there-in blocks should occupy 64 bytes, 512 bits. + * A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size, + * this results in a segment size of 2MB. For 16k pages, the default segment size + * is 8MB. * Not allow to change this. */ #define SIT_VBLOCK_MAP_SIZE 64 -#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry)) +#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry)) /* * F2FS uses 4 bytes to represent block address. As a result, supported size of @@ -418,7 +429,7 @@ struct f2fs_sit_block { * For segment summary * * One summary block contains exactly 512 summary entries, which represents - * exactly 2MB segment by default. Not allow to change the basic units. + * exactly one segment by default. Not allow to change the basic units. * * NOTE: For initializing fields, you must use set_summary * @@ -429,12 +440,12 @@ struct f2fs_sit_block { * from node's page's beginning to get a data block address. * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node) */ -#define ENTRIES_IN_SUM 512 -#define SUMMARY_SIZE (7) /* sizeof(struct summary) */ +#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8) +#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */ #define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */ #define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM) -/* a summary entry for a 4KB-sized block in a segment */ +/* a summary entry for a block in a segment */ struct f2fs_summary { __le32 nid; /* parent node id */ union { @@ -518,7 +529,7 @@ struct f2fs_journal { }; } __packed; -/* 4KB-sized summary block structure */ +/* Block-sized summary block structure */ struct f2fs_summary_block { struct f2fs_summary entries[ENTRIES_IN_SUM]; struct f2fs_journal journal; @@ -559,11 +570,14 @@ typedef __le32 f2fs_hash_t; * Note: there are more reserved space in inline dentry than in regular * dentry, when converting inline dentry we should handle this carefully. */ -#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */ + +/* the number of dentry in a block */ +#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1)) #define SIZE_OF_DIR_ENTRY 11 /* by byte */ #define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ BITS_PER_BYTE) -#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ +#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \ F2FS_SLOT_LEN) * \ NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) #define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */ @@ -576,7 +590,7 @@ struct f2fs_dir_entry { __u8 file_type; /* file type */ } __packed; -/* 4KB-sized directory entry block */ +/* Block-sized directory entry block */ struct f2fs_dentry_block { /* validity bitmap for directory entries in each block */ __u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP]; diff --git a/include/linux/fb.h b/include/linux/fb.h index c14576458228..05dc9624897d 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -536,6 +536,7 @@ extern ssize_t fb_io_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos); +int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma); #define __FB_DEFAULT_IOMEM_OPS_RDWR \ .fb_read = fb_io_read, \ @@ -547,7 +548,7 @@ extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf, .fb_imageblit = cfb_imageblit #define __FB_DEFAULT_IOMEM_OPS_MMAP \ - .fb_mmap = NULL /* default implementation */ + .fb_mmap = fb_io_mmap #define FB_DEFAULT_IOMEM_OPS \ __FB_DEFAULT_IOMEM_OPS_RDWR, \ @@ -591,8 +592,6 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, /* fbmem.c */ extern int register_framebuffer(struct fb_info *fb_info); extern void unregister_framebuffer(struct fb_info *fb_info); -extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); -extern int fb_show_logo(struct fb_info *fb_info, int rotate); extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, u32 height, u32 shift_high, u32 shift_low, u32 mod); @@ -603,9 +602,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var, extern int fb_get_options(const char *name, char **option); extern int fb_new_modelist(struct fb_info *info); -extern bool fb_center_logo; -extern int fb_logo_count; - static inline void lock_fb_info(struct fb_info *info) { mutex_lock(&info->lock); @@ -853,7 +849,10 @@ static inline bool fb_modesetting_disabled(const char *drvname) } #endif -/* Convenience logging macros */ +/* + * Convenience logging macros + */ + #define fb_err(fb_info, fmt, ...) \ pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) #define fb_notice(info, fmt, ...) \ @@ -865,4 +864,12 @@ static inline bool fb_modesetting_disabled(const char *drvname) #define fb_dbg(fb_info, fmt, ...) \ pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_warn_once(fb_info, fmt, ...) \ + pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) + +#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \ + WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_WARN_ON_ONCE(fb_info, x) \ + fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")") + #endif /* _LINUX_FB_H */ diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index e066816f3519..78c8326d74ae 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -83,12 +83,17 @@ struct dentry; static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = rcu_dereference_raw(files->fdt); - - if (fd < fdt->max_fds) { - fd = array_index_nospec(fd, fdt->max_fds); - return rcu_dereference_raw(fdt->fd[fd]); - } - return NULL; + unsigned long mask = array_index_mask_nospec(fd, fdt->max_fds); + struct file *needs_masking; + + /* + * 'mask' is zero for an out-of-bounds fd, all ones for ok. + * 'fd&mask' is 'fd' for ok, or 0 for out of bounds. + * + * Accessing fdt->fd[0] is ok, but needs masking of the result. + */ + needs_masking = rcu_dereference_raw(fdt->fd[fd&mask]); + return (struct file *)(mask & (unsigned long)needs_masking); } static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd) @@ -98,20 +103,9 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un return files_lookup_fd_raw(files, fd); } -static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd) -{ - RCU_LOCKDEP_WARN(!rcu_read_lock_held(), - "suspicious rcu_dereference_check() usage"); - return files_lookup_fd_raw(files, fd); -} - -static inline struct file *lookup_fd_rcu(unsigned int fd) -{ - return files_lookup_fd_rcu(current->files, fd); -} - -struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd); -struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd); +struct file *lookup_fdget_rcu(unsigned int fd); +struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd); +struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd); struct task_struct; @@ -125,7 +119,7 @@ int iterate_fd(struct files_struct *, unsigned, extern int close_fd(unsigned int fd); extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); -extern struct file *close_fd_get_file(unsigned int fd); +extern struct file *file_close_fd(unsigned int fd); extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, struct files_struct **new_fdp); diff --git a/include/linux/file.h b/include/linux/file.h index 6e9099d29343..6834a29338c4 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -96,18 +96,8 @@ DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T), extern void fd_install(unsigned int fd, struct file *file); -extern int __receive_fd(struct file *file, int __user *ufd, - unsigned int o_flags); +int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags); -extern int receive_fd(struct file *file, unsigned int o_flags); - -static inline int receive_fd_user(struct file *file, int __user *ufd, - unsigned int o_flags) -{ - if (ufd == NULL) - return -EFAULT; - return __receive_fd(file, ufd, o_flags); -} int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags); extern void flush_delayed_fput(void); diff --git a/include/linux/filter.h b/include/linux/filter.h index 761af6b3cf2b..68fb6c8142fe 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -117,21 +117,25 @@ struct ctl_table_header; /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ -#define BPF_ALU64_IMM(OP, DST, IMM) \ +#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ - .off = 0, \ + .off = OFF, \ .imm = IMM }) +#define BPF_ALU64_IMM(OP, DST, IMM) \ + BPF_ALU64_IMM_OFF(OP, DST, IMM, 0) -#define BPF_ALU32_IMM(OP, DST, IMM) \ +#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ - .off = 0, \ + .off = OFF, \ .imm = IMM }) +#define BPF_ALU32_IMM(OP, DST, IMM) \ + BPF_ALU32_IMM_OFF(OP, DST, IMM, 0) /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ @@ -143,6 +147,16 @@ struct ctl_table_header; .off = 0, \ .imm = LEN }) +/* Byte Swap, bswap16/32/64 */ + +#define BPF_BSWAP(DST, LEN) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = LEN }) + /* Short form of mov, dst_reg = src_reg */ #define BPF_MOV64_REG(DST, SRC) \ @@ -179,6 +193,24 @@ struct ctl_table_header; .off = 0, \ .imm = IMM }) +/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */ + +#define BPF_MOVSX64_REG(DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +#define BPF_MOVSX32_REG(DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Special form of mov32, used for doing explicit zero extension on dst. */ #define BPF_ZEXT_REG(DST) \ ((struct bpf_insn) { \ @@ -263,6 +295,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = OFF, \ .imm = 0 }) +/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */ + +#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ @@ -694,7 +736,7 @@ static inline void bpf_compute_and_save_data_end( cb->data_end = skb->data + skb_headlen(skb); } -/* Restore data saved by bpf_compute_data_pointers(). */ +/* Restore data saved by bpf_compute_and_save_data_end(). */ static inline void bpf_restore_data_end( struct sk_buff *skb, void *saved_data_end) { @@ -912,6 +954,8 @@ bool bpf_jit_needs_zext(void); bool bpf_jit_supports_subprog_tailcalls(void); bool bpf_jit_supports_kfunc_call(void); bool bpf_jit_supports_far_kfunc_call(void); +bool bpf_jit_supports_exceptions(void); +void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); bool bpf_helper_changes_pkt_data(void *func); static inline bool bpf_dump_raw_ok(const struct cred *cred) @@ -981,12 +1025,6 @@ int xdp_do_redirect_frame(struct net_device *dev, struct bpf_prog *prog); void xdp_do_flush(void); -/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as - * it is no longer only flushing maps. Keep this define for compatibility - * until all drivers are updated - do not use xdp_do_flush_map() in new code! - */ -#define xdp_do_flush_map xdp_do_flush - void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act); #ifdef CONFIG_INET @@ -1029,7 +1067,7 @@ struct bpf_binary_header * bpf_jit_binary_pack_hdr(const struct bpf_prog *fp); void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns); -void bpf_prog_pack_free(struct bpf_binary_header *hdr); +void bpf_prog_pack_free(void *ptr, u32 size); static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) { @@ -1127,6 +1165,7 @@ const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, bool is_bpf_text_address(unsigned long addr); int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym); +struct bpf_prog *bpf_prog_ksym_find(unsigned long addr); static inline const char * bpf_address_lookup(unsigned long addr, unsigned long *size, @@ -1194,6 +1233,11 @@ static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, return -ERANGE; } +static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) +{ + return NULL; +} + static inline const char * bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) @@ -1285,6 +1329,7 @@ struct bpf_sock_addr_kern { */ u64 tmp_reg; void *t_ctx; /* Attach type specific context. */ + u32 uaddrlen; }; struct bpf_sock_ops_kern { diff --git a/include/linux/firewire.h b/include/linux/firewire.h index bd3fc75d4f14..dd9f2d765e68 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -75,7 +75,7 @@ void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p); int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); int fw_csr_string(const u32 *directory, int key, char *buf, size_t size); -extern struct bus_type fw_bus_type; +extern const struct bus_type fw_bus_type; struct fw_card_driver; struct fw_node; diff --git a/include/linux/firmware.h b/include/linux/firmware.h index de7fea3bca51..0311858b46ce 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -27,6 +27,7 @@ struct firmware { * @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size * @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log * @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry + * @FW_UPLOAD_ERR_FW_INVALID: invalid firmware file * @FW_UPLOAD_ERR_MAX: Maximum error code marker */ enum fw_upload_err { @@ -38,6 +39,7 @@ enum fw_upload_err { FW_UPLOAD_ERR_INVALID_SIZE, FW_UPLOAD_ERR_RW_ERROR, FW_UPLOAD_ERR_WEAROUT, + FW_UPLOAD_ERR_FW_INVALID, FW_UPLOAD_ERR_MAX }; diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h index 95b0da2326a9..8eaf8922ab02 100644 --- a/include/linux/firmware/meson/meson_sm.h +++ b/include/linux/firmware/meson/meson_sm.h @@ -19,7 +19,7 @@ enum { struct meson_sm_firmware; int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index, - u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); + s32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer, unsigned int b_size, unsigned int cmd_index, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h new file mode 100644 index 000000000000..5c28298a98be --- /dev/null +++ b/include/linux/firmware/qcom/qcom_qseecom.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Driver for Qualcomm Secure Execution Environment (SEE) interface (QSEECOM). + * Responsible for setting up and managing QSEECOM client devices. + * + * Copyright (C) 2023 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef __QCOM_QSEECOM_H +#define __QCOM_QSEECOM_H + +#include <linux/auxiliary_bus.h> +#include <linux/types.h> + +#include <linux/firmware/qcom/qcom_scm.h> + +/** + * struct qseecom_client - QSEECOM client device. + * @aux_dev: Underlying auxiliary device. + * @app_id: ID of the loaded application. + */ +struct qseecom_client { + struct auxiliary_device aux_dev; + u32 app_id; +}; + +/** + * qcom_qseecom_app_send() - Send to and receive data from a given QSEE app. + * @client: The QSEECOM client associated with the target app. + * @req: Request buffer sent to the app (must be DMA-mappable). + * @req_size: Size of the request buffer. + * @rsp: Response buffer, written to by the app (must be DMA-mappable). + * @rsp_size: Size of the response buffer. + * + * Sends a request to the QSEE app associated with the given client and read + * back its response. The caller must provide two DMA memory regions, one for + * the request and one for the response, and fill out the @req region with the + * respective (app-specific) request data. The QSEE app reads this and returns + * its response in the @rsp region. + * + * Note: This is a convenience wrapper around qcom_scm_qseecom_app_send(). + * Clients should prefer to use this wrapper. + * + * Return: Zero on success, nonzero on failure. + */ +static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size, + void *rsp, size_t rsp_size) +{ + return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size); +} + +#endif /* __QCOM_QSEECOM_H */ diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h index 0c091a3f6d49..ccaf28846054 100644 --- a/include/linux/firmware/qcom/qcom_scm.h +++ b/include/linux/firmware/qcom/qcom_scm.h @@ -59,12 +59,12 @@ enum qcom_scm_ice_cipher { #define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE) #define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC) -extern bool qcom_scm_is_available(void); +bool qcom_scm_is_available(void); -extern int qcom_scm_set_cold_boot_addr(void *entry); -extern int qcom_scm_set_warm_boot_addr(void *entry); -extern void qcom_scm_cpu_power_down(u32 flags); -extern int qcom_scm_set_remote_state(u32 state, u32 id); +int qcom_scm_set_cold_boot_addr(void *entry); +int qcom_scm_set_warm_boot_addr(void *entry); +void qcom_scm_cpu_power_down(u32 flags); +int qcom_scm_set_remote_state(u32 state, u32 id); struct qcom_scm_pas_metadata { void *ptr; @@ -72,54 +72,69 @@ struct qcom_scm_pas_metadata { ssize_t size; }; -extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, - size_t size, - struct qcom_scm_pas_metadata *ctx); -extern void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx); -extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, - phys_addr_t size); -extern int qcom_scm_pas_auth_and_reset(u32 peripheral); -extern int qcom_scm_pas_shutdown(u32 peripheral); -extern bool qcom_scm_pas_supported(u32 peripheral); - -extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); -extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); - -extern bool qcom_scm_restore_sec_cfg_available(void); -extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); -extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); -extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); -extern int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size); -extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, - u32 cp_nonpixel_start, - u32 cp_nonpixel_size); -extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, - u64 *src, - const struct qcom_scm_vmperm *newvm, - unsigned int dest_cnt); - -extern bool qcom_scm_ocmem_lock_available(void); -extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, - u32 size, u32 mode); -extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, - u32 size); - -extern bool qcom_scm_ice_available(void); -extern int qcom_scm_ice_invalidate_key(u32 index); -extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, - enum qcom_scm_ice_cipher cipher, - u32 data_unit_size); - -extern bool qcom_scm_hdcp_available(void); -extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, - u32 *resp); - -extern int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt); -extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en); - -extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, - u64 limit_node, u32 node_id, u64 version); -extern int qcom_scm_lmh_profile_change(u32 profile_id); -extern bool qcom_scm_lmh_dcvsh_available(void); +int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, + struct qcom_scm_pas_metadata *ctx); +void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx); +int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size); +int qcom_scm_pas_auth_and_reset(u32 peripheral); +int qcom_scm_pas_shutdown(u32 peripheral); +bool qcom_scm_pas_supported(u32 peripheral); + +int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); +int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); + +bool qcom_scm_restore_sec_cfg_available(void); +int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); +int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); +int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); +int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size); +int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, + u32 cp_nonpixel_start, u32 cp_nonpixel_size); +int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, u64 *src, + const struct qcom_scm_vmperm *newvm, + unsigned int dest_cnt); + +bool qcom_scm_ocmem_lock_available(void); +int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, + u32 mode); +int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size); + +bool qcom_scm_ice_available(void); +int qcom_scm_ice_invalidate_key(u32 index); +int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, u32 data_unit_size); + +bool qcom_scm_hdcp_available(void); +int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); + +int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt); +int qcom_scm_qsmmu500_wait_safe_toggle(bool en); + +int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, + u64 limit_node, u32 node_id, u64 version); +int qcom_scm_lmh_profile_change(u32 profile_id); +bool qcom_scm_lmh_dcvsh_available(void); + +#ifdef CONFIG_QCOM_QSEECOM + +int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id); +int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp, + size_t rsp_size); + +#else /* CONFIG_QCOM_QSEECOM */ + +static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) +{ + return -EINVAL; +} + +static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req, + size_t req_size, void *rsp, + size_t rsp_size) +{ + return -EINVAL; +} + +#endif /* CONFIG_QCOM_QSEECOM */ #endif diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index e8b12ec8b060..9a7e52739251 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -32,6 +32,7 @@ #define PM_SIP_SVC 0xC2000000 /* PM API versions */ +#define PM_API_VERSION_1 1 #define PM_API_VERSION_2 2 #define PM_PINCTRL_PARAM_SET_VERSION 2 @@ -47,6 +48,9 @@ #define FAMILY_CODE_MASK GENMASK(27, 21) #define SUB_FAMILY_CODE_MASK GENMASK(20, 19) +#define API_ID_MASK GENMASK(7, 0) +#define MODULE_ID_MASK GENMASK(11, 8) + /* ATF only commands */ #define TF_A_PM_REGISTER_SGI 0xa04 #define PM_GET_TRUSTZONE_VERSION 0xa03 @@ -91,15 +95,41 @@ /* * Node IDs for the Error Events. */ -#define EVENT_ERROR_PMC_ERR1 (0x28100000U) -#define EVENT_ERROR_PMC_ERR2 (0x28104000U) -#define EVENT_ERROR_PSM_ERR1 (0x28108000U) -#define EVENT_ERROR_PSM_ERR2 (0x2810C000U) +#define VERSAL_EVENT_ERROR_PMC_ERR1 (0x28100000U) +#define VERSAL_EVENT_ERROR_PMC_ERR2 (0x28104000U) +#define VERSAL_EVENT_ERROR_PSM_ERR1 (0x28108000U) +#define VERSAL_EVENT_ERROR_PSM_ERR2 (0x2810C000U) + +#define VERSAL_NET_EVENT_ERROR_PMC_ERR1 (0x28100000U) +#define VERSAL_NET_EVENT_ERROR_PMC_ERR2 (0x28104000U) +#define VERSAL_NET_EVENT_ERROR_PMC_ERR3 (0x28108000U) +#define VERSAL_NET_EVENT_ERROR_PSM_ERR1 (0x2810C000U) +#define VERSAL_NET_EVENT_ERROR_PSM_ERR2 (0x28110000U) +#define VERSAL_NET_EVENT_ERROR_PSM_ERR3 (0x28114000U) +#define VERSAL_NET_EVENT_ERROR_PSM_ERR4 (0x28118000U) /* ZynqMP SD tap delay tuning */ #define SD_ITAPDLY 0xFF180314 #define SD_OTAPDLYSEL 0xFF180318 +/** + * XPM_EVENT_ERROR_MASK_DDRMC_CR: Error event mask for DDRMC MC Correctable ECC Error. + */ +#define XPM_EVENT_ERROR_MASK_DDRMC_CR BIT(18) + +/** + * XPM_EVENT_ERROR_MASK_DDRMC_NCR: Error event mask for DDRMC MC Non-Correctable ECC Error. + */ +#define XPM_EVENT_ERROR_MASK_DDRMC_NCR BIT(19) +#define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13) +#define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12) + +enum pm_module_id { + PM_MODULE_ID = 0x0, + XSEM_MODULE_ID = 0x3, + TF_A_MODULE_ID = 0xa, +}; + enum pm_api_cb_id { PM_INIT_SUSPEND_CB = 30, PM_ACKNOWLEDGE_CB = 31, @@ -107,6 +137,7 @@ enum pm_api_cb_id { }; enum pm_api_id { + PM_API_FEATURES = 0, PM_GET_API_VERSION = 1, PM_REGISTER_NOTIFIER = 5, PM_FORCE_POWERDOWN = 8, @@ -126,7 +157,6 @@ enum pm_api_id { PM_SECURE_SHA = 26, PM_PINCTRL_REQUEST = 28, PM_PINCTRL_RELEASE = 29, - PM_PINCTRL_GET_FUNCTION = 30, PM_PINCTRL_SET_FUNCTION = 31, PM_PINCTRL_CONFIG_PARAM_GET = 32, PM_PINCTRL_CONFIG_PARAM_SET = 33, @@ -137,8 +167,6 @@ enum pm_api_id { PM_CLOCK_GETSTATE = 38, PM_CLOCK_SETDIVIDER = 39, PM_CLOCK_GETDIVIDER = 40, - PM_CLOCK_SETRATE = 41, - PM_CLOCK_GETRATE = 42, PM_CLOCK_SETPARENT = 43, PM_CLOCK_GETPARENT = 44, PM_FPGA_READ = 46, @@ -149,7 +177,9 @@ enum pm_api_id { /* PMU-FW return status codes */ enum pm_ret_status { XST_PM_SUCCESS = 0, + XST_PM_INVALID_VERSION = 4, XST_PM_NO_FEATURE = 19, + XST_PM_INVALID_CRC = 301, XST_PM_INTERNAL = 2000, XST_PM_CONFLICT = 2001, XST_PM_NO_ACCESS = 2002, @@ -497,20 +527,18 @@ struct zynqmp_pm_query_data { u32 arg3; }; -int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, - u32 arg2, u32 arg3, u32 *ret_payload); +int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...); #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) int zynqmp_pm_get_api_version(u32 *version); int zynqmp_pm_get_chipid(u32 *idcode, u32 *version); +int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily); int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out); int zynqmp_pm_clock_enable(u32 clock_id); int zynqmp_pm_clock_disable(u32 clock_id); int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state); int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider); int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider); -int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate); -int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate); int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id); int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id); int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode); @@ -547,7 +575,6 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype); int zynqmp_pm_set_boot_health_status(u32 value); int zynqmp_pm_pinctrl_request(const u32 pin); int zynqmp_pm_pinctrl_release(const u32 pin); -int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id); int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id); int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, u32 *value); @@ -584,6 +611,11 @@ static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version) return -ENODEV; } +static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) +{ + return -ENODEV; +} + static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out) { @@ -615,16 +647,6 @@ static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider) return -ENODEV; } -static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate) -{ - return -ENODEV; -} - -static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate) -{ - return -ENODEV; -} - static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id) { return -ENODEV; @@ -794,11 +816,6 @@ static inline int zynqmp_pm_pinctrl_release(const u32 pin) return -ENODEV; } -static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id) -{ - return -ENODEV; -} - static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id) { return -ENODEV; diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index da51a83b2829..89a6888f2f9e 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -93,13 +93,9 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) #if __has_builtin(__builtin_dynamic_object_size) #define POS __pass_dynamic_object_size(1) #define POS0 __pass_dynamic_object_size(0) -#define __struct_size(p) __builtin_dynamic_object_size(p, 0) -#define __member_size(p) __builtin_dynamic_object_size(p, 1) #else #define POS __pass_object_size(1) #define POS0 __pass_object_size(0) -#define __struct_size(p) __builtin_object_size(p, 0) -#define __member_size(p) __builtin_object_size(p, 1) #endif #define __compiletime_lessthan(bounds, length) ( \ @@ -218,51 +214,6 @@ __kernel_size_t __fortify_strlen(const char * const POS p) return ret; } -/* Defined after fortified strlen() to reuse it. */ -extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); -/** - * strlcpy - Copy a string into another string buffer - * - * @p: pointer to destination of copy - * @q: pointer to NUL-terminated source string to copy - * @size: maximum number of bytes to write at @p - * - * If strlen(@q) >= @size, the copy of @q will be truncated at - * @size - 1 bytes. @p will always be NUL-terminated. - * - * Do not use this function. While FORTIFY_SOURCE tries to avoid - * over-reads when calculating strlen(@q), it is still possible. - * Prefer strscpy(), though note its different return values for - * detecting truncation. - * - * Returns total number of bytes written to @p, including terminating NUL. - * - */ -__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size) -{ - const size_t p_size = __member_size(p); - const size_t q_size = __member_size(q); - size_t q_len; /* Full count of source string length. */ - size_t len; /* Count of characters going into destination. */ - - if (p_size == SIZE_MAX && q_size == SIZE_MAX) - return __real_strlcpy(p, q, size); - q_len = strlen(q); - len = (q_len >= size) ? size - 1 : q_len; - if (__builtin_constant_p(size) && __builtin_constant_p(q_len) && size) { - /* Write size is always larger than destination. */ - if (len >= p_size) - __write_overflow(); - } - if (size) { - if (len >= p_size) - fortify_panic(__func__); - __underlying_memcpy(p, q, len); - p[len] = '\0'; - } - return q_len; -} - /* Defined after fortified strnlen() to reuse it. */ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy); /** @@ -276,12 +227,6 @@ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy); * @p buffer. The behavior is undefined if the string buffers overlap. The * destination @p buffer is always NUL terminated, unless it's zero-sized. * - * Preferred to strlcpy() since the API doesn't require reading memory - * from the source @q string beyond the specified @size bytes, and since - * the return value is easier to error-check than strlcpy()'s. - * In addition, the implementation is robust to the string changing out - * from underneath it, unlike the current strlcpy() implementation. - * * Preferred to strncpy() since it always returns a valid string, and * doesn't unnecessarily force the tail of the destination buffer to be * zero padded. If padding is desired please use strscpy_pad(). @@ -643,7 +588,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, __q_size_field, #op), \ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \ __fortify_size, \ - "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \ + "field \"" #p "\" at " FILE_LINE, \ __p_size_field); \ __underlying_##op(p, q, __fortify_size); \ }) diff --git a/include/linux/framer/framer-provider.h b/include/linux/framer/framer-provider.h new file mode 100644 index 000000000000..782cd5fc83d5 --- /dev/null +++ b/include/linux/framer/framer-provider.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Generic framer profider header file + * + * Copyright 2023 CS GROUP France + * + * Author: Herve Codina <herve.codina@bootlin.com> + */ + +#ifndef __DRIVERS_PROVIDER_FRAMER_H +#define __DRIVERS_PROVIDER_FRAMER_H + +#include <linux/export.h> +#include <linux/framer/framer.h> +#include <linux/types.h> + +#define FRAMER_FLAG_POLL_STATUS BIT(0) + +/** + * struct framer_ops - set of function pointers for performing framer operations + * @init: operation to be performed for initializing the framer + * @exit: operation to be performed while exiting + * @power_on: powering on the framer + * @power_off: powering off the framer + * @flags: OR-ed flags (FRAMER_FLAG_*) to ask for core functionality + * - @FRAMER_FLAG_POLL_STATUS: + * Ask the core to perform a polling to get the framer status and + * notify consumers on change. + * The framer should call @framer_notify_status_change() when it + * detects a status change. This is usually done using interrupts. + * If the framer cannot detect this change, it can ask the core for + * a status polling. The core will call @get_status() periodically + * and, on change detected, it will notify the consumer. + * the @get_status() + * @owner: the module owner containing the ops + */ +struct framer_ops { + int (*init)(struct framer *framer); + void (*exit)(struct framer *framer); + int (*power_on)(struct framer *framer); + int (*power_off)(struct framer *framer); + + /** + * @get_status: + * + * Optional. + * + * Used to get the framer status. framer_init() must have + * been called on the framer. + * + * Returns: 0 if successful, an negative error code otherwise + */ + int (*get_status)(struct framer *framer, struct framer_status *status); + + /** + * @set_config: + * + * Optional. + * + * Used to set the framer configuration. framer_init() must have + * been called on the framer. + * + * Returns: 0 if successful, an negative error code otherwise + */ + int (*set_config)(struct framer *framer, const struct framer_config *config); + + /** + * @get_config: + * + * Optional. + * + * Used to get the framer configuration. framer_init() must have + * been called on the framer. + * + * Returns: 0 if successful, an negative error code otherwise + */ + int (*get_config)(struct framer *framer, struct framer_config *config); + + u32 flags; + struct module *owner; +}; + +/** + * struct framer_provider - represents the framer provider + * @dev: framer provider device + * @children: can be used to override the default (dev->of_node) child node + * @owner: the module owner having of_xlate + * @list: to maintain a linked list of framer providers + * @of_xlate: function pointer to obtain framer instance from framer pointer + */ +struct framer_provider { + struct device *dev; + struct module *owner; + struct list_head list; + struct framer * (*of_xlate)(struct device *dev, + struct of_phandle_args *args); +}; + +static inline void framer_set_drvdata(struct framer *framer, void *data) +{ + dev_set_drvdata(&framer->dev, data); +} + +static inline void *framer_get_drvdata(struct framer *framer) +{ + return dev_get_drvdata(&framer->dev); +} + +#if IS_ENABLED(CONFIG_GENERIC_FRAMER) + +/* Create and destroy a framer */ +struct framer *framer_create(struct device *dev, struct device_node *node, + const struct framer_ops *ops); +void framer_destroy(struct framer *framer); + +/* devm version */ +struct framer *devm_framer_create(struct device *dev, struct device_node *node, + const struct framer_ops *ops); + +struct framer *framer_provider_simple_of_xlate(struct device *dev, + struct of_phandle_args *args); + +struct framer_provider * +__framer_provider_of_register(struct device *dev, struct module *owner, + struct framer *(*of_xlate)(struct device *dev, + struct of_phandle_args *args)); + +void framer_provider_of_unregister(struct framer_provider *framer_provider); + +struct framer_provider * +__devm_framer_provider_of_register(struct device *dev, struct module *owner, + struct framer *(*of_xlate)(struct device *dev, + struct of_phandle_args *args)); + +void framer_notify_status_change(struct framer *framer); + +#else /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */ + +static inline struct framer *framer_create(struct device *dev, struct device_node *node, + const struct framer_ops *ops) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void framer_destroy(struct framer *framer) +{ +} + +/* devm version */ +static inline struct framer *devm_framer_create(struct device *dev, struct device_node *node, + const struct framer_ops *ops) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct framer *framer_provider_simple_of_xlate(struct device *dev, + struct of_phandle_args *args) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct framer_provider * +__framer_provider_of_register(struct device *dev, struct module *owner, + struct framer *(*of_xlate)(struct device *dev, + struct of_phandle_args *args)) +{ + return ERR_PTR(-ENOSYS); +} + +void framer_provider_of_unregister(struct framer_provider *framer_provider) +{ +} + +static inline struct framer_provider * +__devm_framer_provider_of_register(struct device *dev, struct module *owner, + struct framer *(*of_xlate)(struct device *dev, + struct of_phandle_args *args)) +{ + return ERR_PTR(-ENOSYS); +} + +void framer_notify_status_change(struct framer *framer) +{ +} + +#endif /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */ + +#define framer_provider_of_register(dev, xlate) \ + __framer_provider_of_register((dev), THIS_MODULE, (xlate)) + +#define devm_framer_provider_of_register(dev, xlate) \ + __devm_framer_provider_of_register((dev), THIS_MODULE, (xlate)) + +#endif /* __DRIVERS_PROVIDER_FRAMER_H */ diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h new file mode 100644 index 000000000000..9a9b88962c29 --- /dev/null +++ b/include/linux/framer/framer.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Generic framer header file + * + * Copyright 2023 CS GROUP France + * + * Author: Herve Codina <herve.codina@bootlin.com> + */ + +#ifndef __DRIVERS_FRAMER_H +#define __DRIVERS_FRAMER_H + +#include <linux/err.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/of.h> +#include <linux/device.h> +#include <linux/workqueue.h> + +/** + * enum framer_iface - Framer interface + * @FRAMER_IFACE_E1: E1 interface + * @FRAMER_IFACE_T1: T1 interface + */ +enum framer_iface { + FRAMER_IFACE_E1, + FRAMER_IFACE_T1, +}; + +/** + * enum framer_clock_type - Framer clock type + * @FRAMER_CLOCK_EXT: External clock + * @FRAMER_CLOCK_INT: Internal clock + */ +enum framer_clock_type { + FRAMER_CLOCK_EXT, + FRAMER_CLOCK_INT, +}; + +/** + * struct framer_config - Framer configuration + * @iface: Framer line interface + * @clock_type: Framer clock type + * @line_clock_rate: Framer line clock rate + */ +struct framer_config { + enum framer_iface iface; + enum framer_clock_type clock_type; + unsigned long line_clock_rate; +}; + +/** + * struct framer_status - Framer status + * @link_is_on: Framer link state. true, the link is on, false, the link is off. + */ +struct framer_status { + bool link_is_on; +}; + +/** + * enum framer_event - Event available for notification + * @FRAMER_EVENT_STATUS: Event notified on framer_status changes + */ +enum framer_event { + FRAMER_EVENT_STATUS, +}; + +/** + * struct framer - represents the framer device + * @dev: framer device + * @id: id of the framer device + * @ops: function pointers for performing framer operations + * @mutex: mutex to protect framer_ops + * @init_count: used to protect when the framer is used by multiple consumers + * @power_count: used to protect when the framer is used by multiple consumers + * @pwr: power regulator associated with the framer + * @notify_status_work: work structure used for status notifications + * @notifier_list: notifier list used for notifications + * @polling_work: delayed work structure used for the polling task + * @prev_status: previous read status used by the polling task to detect changes + */ +struct framer { + struct device dev; + int id; + const struct framer_ops *ops; + struct mutex mutex; /* Protect framer */ + int init_count; + int power_count; + struct regulator *pwr; + struct work_struct notify_status_work; + struct blocking_notifier_head notifier_list; + struct delayed_work polling_work; + struct framer_status prev_status; +}; + +#if IS_ENABLED(CONFIG_GENERIC_FRAMER) +int framer_pm_runtime_get(struct framer *framer); +int framer_pm_runtime_get_sync(struct framer *framer); +int framer_pm_runtime_put(struct framer *framer); +int framer_pm_runtime_put_sync(struct framer *framer); +int framer_init(struct framer *framer); +int framer_exit(struct framer *framer); +int framer_power_on(struct framer *framer); +int framer_power_off(struct framer *framer); +int framer_get_status(struct framer *framer, struct framer_status *status); +int framer_get_config(struct framer *framer, struct framer_config *config); +int framer_set_config(struct framer *framer, const struct framer_config *config); +int framer_notifier_register(struct framer *framer, struct notifier_block *nb); +int framer_notifier_unregister(struct framer *framer, struct notifier_block *nb); + +struct framer *framer_get(struct device *dev, const char *con_id); +void framer_put(struct device *dev, struct framer *framer); + +struct framer *devm_framer_get(struct device *dev, const char *con_id); +struct framer *devm_framer_optional_get(struct device *dev, const char *con_id); +#else +static inline int framer_pm_runtime_get(struct framer *framer) +{ + return -ENOSYS; +} + +static inline int framer_pm_runtime_get_sync(struct framer *framer) +{ + return -ENOSYS; +} + +static inline int framer_pm_runtime_put(struct framer *framer) +{ + return -ENOSYS; +} + +static inline int framer_pm_runtime_put_sync(struct framer *framer) +{ + return -ENOSYS; +} + +static inline int framer_init(struct framer *framer) +{ + return -ENOSYS; +} + +static inline int framer_exit(struct framer *framer) +{ + return -ENOSYS; +} + +static inline int framer_power_on(struct framer *framer) +{ + return -ENOSYS; +} + +static inline int framer_power_off(struct framer *framer) +{ + return -ENOSYS; +} + +static inline int framer_get_status(struct framer *framer, struct framer_status *status) +{ + return -ENOSYS; +} + +static inline int framer_get_config(struct framer *framer, struct framer_config *config) +{ + return -ENOSYS; +} + +static inline int framer_set_config(struct framer *framer, const struct framer_config *config) +{ + return -ENOSYS; +} + +static inline int framer_notifier_register(struct framer *framer, + struct notifier_block *nb) +{ + return -ENOSYS; +} + +static inline int framer_notifier_unregister(struct framer *framer, + struct notifier_block *nb) +{ + return -ENOSYS; +} + +struct framer *framer_get(struct device *dev, const char *con_id) +{ + return ERR_PTR(-ENOSYS); +} + +void framer_put(struct device *dev, struct framer *framer) +{ +} + +static inline struct framer *devm_framer_get(struct device *dev, const char *con_id) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct framer *devm_framer_optional_get(struct device *dev, const char *con_id) +{ + return NULL; +} + +#endif + +#endif /* __DRIVERS_FRAMER_H */ diff --git a/include/linux/framer/pef2256.h b/include/linux/framer/pef2256.h new file mode 100644 index 000000000000..71d80af58c40 --- /dev/null +++ b/include/linux/framer/pef2256.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * PEF2256 consumer API + * + * Copyright 2023 CS GROUP France + * + * Author: Herve Codina <herve.codina@bootlin.com> + */ +#ifndef __PEF2256_H__ +#define __PEF2256_H__ + +#include <linux/types.h> + +struct pef2256; +struct regmap; + +/* Retrieve the PEF2256 regmap */ +struct regmap *pef2256_get_regmap(struct pef2256 *pef2256); + +/* PEF2256 hardware versions */ +enum pef2256_version { + PEF2256_VERSION_UNKNOWN, + PEF2256_VERSION_1_2, + PEF2256_VERSION_2_1, + PEF2256_VERSION_2_2, +}; + +/* Get the PEF2256 hardware version */ +enum pef2256_version pef2256_get_version(struct pef2256 *pef2256); + +#endif /* __PEF2256_H__ */ diff --git a/include/linux/freelist.h b/include/linux/freelist.h deleted file mode 100644 index fc1842b96469..000000000000 --- a/include/linux/freelist.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ -#ifndef FREELIST_H -#define FREELIST_H - -#include <linux/atomic.h> - -/* - * Copyright: cameron@moodycamel.com - * - * A simple CAS-based lock-free free list. Not the fastest thing in the world - * under heavy contention, but simple and correct (assuming nodes are never - * freed until after the free list is destroyed), and fairly speedy under low - * contention. - * - * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists - */ - -struct freelist_node { - atomic_t refs; - struct freelist_node *next; -}; - -struct freelist_head { - struct freelist_node *head; -}; - -#define REFS_ON_FREELIST 0x80000000 -#define REFS_MASK 0x7FFFFFFF - -static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list) -{ - /* - * Since the refcount is zero, and nobody can increase it once it's - * zero (except us, and we run only one copy of this method per node at - * a time, i.e. the single thread case), then we know we can safely - * change the next pointer of the node; however, once the refcount is - * back above zero, then other threads could increase it (happens under - * heavy contention, when the refcount goes to zero in between a load - * and a refcount increment of a node in try_get, then back up to - * something non-zero, then the refcount increment is done by the other - * thread) -- so if the CAS to add the node to the actual list fails, - * decrese the refcount and leave the add operation to the next thread - * who puts the refcount back to zero (which could be us, hence the - * loop). - */ - struct freelist_node *head = READ_ONCE(list->head); - - for (;;) { - WRITE_ONCE(node->next, head); - atomic_set_release(&node->refs, 1); - - if (!try_cmpxchg_release(&list->head, &head, node)) { - /* - * Hmm, the add failed, but we can only try again when - * the refcount goes back to zero. - */ - if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1) - continue; - } - return; - } -} - -static inline void freelist_add(struct freelist_node *node, struct freelist_head *list) -{ - /* - * We know that the should-be-on-freelist bit is 0 at this point, so - * it's safe to set it using a fetch_add. - */ - if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) { - /* - * Oh look! We were the last ones referencing this node, and we - * know we want to add it to the free list, so let's do it! - */ - __freelist_add(node, list); - } -} - -static inline struct freelist_node *freelist_try_get(struct freelist_head *list) -{ - struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head); - unsigned int refs; - - while (head) { - prev = head; - refs = atomic_read(&head->refs); - if ((refs & REFS_MASK) == 0 || - !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) { - head = smp_load_acquire(&list->head); - continue; - } - - /* - * Good, reference count has been incremented (it wasn't at - * zero), which means we can read the next and not worry about - * it changing between now and the time we do the CAS. - */ - next = READ_ONCE(head->next); - if (try_cmpxchg_acquire(&list->head, &head, next)) { - /* - * Yay, got the node. This means it was on the list, - * which means should-be-on-freelist must be false no - * matter the refcount (because nobody else knows it's - * been taken off yet, it can't have been put back on). - */ - WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST); - - /* - * Decrease refcount twice, once for our ref, and once - * for the list's ref. - */ - atomic_fetch_add(-2, &head->refs); - - return head; - } - - /* - * OK, the head must have changed on us, but we still need to decrement - * the refcount we increased. - */ - refs = atomic_fetch_add(-1, &prev->refs); - if (refs == REFS_ON_FREELIST + 1) - __freelist_add(prev, list); - } - - return NULL; -} - -#endif /* FREELIST_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 4aeb3fa11927..ed5966a70495 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -67,7 +67,7 @@ struct swap_info_struct; struct seq_file; struct workqueue_struct; struct iov_iter; -struct fscrypt_info; +struct fscrypt_inode_info; struct fscrypt_operations; struct fsverity_info; struct fsverity_operations; @@ -434,7 +434,7 @@ struct address_space_operations { bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb); - int (*error_remove_page)(struct address_space *, struct page *); + int (*error_remove_folio)(struct address_space *, struct folio *); /* swapfile support */ int (*swap_activate)(struct swap_info_struct *sis, struct file *file, @@ -454,7 +454,7 @@ extern const struct address_space_operations empty_aops; * It is also used to block modification of page cache contents through * memory mappings. * @gfp_mask: Memory allocation flags to use for allocating pages. - * @i_mmap_writable: Number of VM_SHARED mappings. + * @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings. * @nr_thps: Number of THPs in the pagecache (non-shmem only). * @i_mmap: Tree of private and shared mappings. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. @@ -463,9 +463,9 @@ extern const struct address_space_operations empty_aops; * @a_ops: Methods. * @flags: Error bits and flags (AS_*). * @wb_err: The most recent error which has occurred. - * @private_lock: For use by the owner of the address_space. - * @private_list: For use by the owner of the address_space. - * @private_data: For use by the owner of the address_space. + * @i_private_lock: For use by the owner of the address_space. + * @i_private_list: For use by the owner of the address_space. + * @i_private_data: For use by the owner of the address_space. */ struct address_space { struct inode *host; @@ -484,9 +484,9 @@ struct address_space { unsigned long flags; struct rw_semaphore i_mmap_rwsem; errseq_t wb_err; - spinlock_t private_lock; - struct list_head private_list; - void *private_data; + spinlock_t i_private_lock; + struct list_head i_private_list; + void * i_private_data; } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but @@ -557,7 +557,7 @@ static inline int mapping_mapped(struct address_space *mapping) /* * Might pages of this file have been modified in userspace? - * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap + * Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. * @@ -671,8 +671,8 @@ struct inode { }; dev_t i_rdev; loff_t i_size; - struct timespec64 i_atime; - struct timespec64 i_mtime; + struct timespec64 __i_atime; + struct timespec64 __i_mtime; struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */ spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; @@ -738,7 +738,7 @@ struct inode { #endif #ifdef CONFIG_FS_ENCRYPTION - struct fscrypt_info *i_crypt_info; + struct fscrypt_inode_info *i_crypt_info; #endif #ifdef CONFIG_FS_VERITY @@ -991,8 +991,10 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) */ struct file { union { + /* fput() uses task work when closing and freeing file (default). */ + struct callback_head f_task_work; + /* fput() must use workqueue (most kernel threads). */ struct llist_node f_llist; - struct rcu_head f_rcuhead; unsigned int f_iocb_flags; }; @@ -1042,7 +1044,10 @@ static inline struct file *get_file(struct file *f) atomic_long_inc(&f->f_count); return f; } -#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count) + +struct file *get_file_rcu(struct file __rcu **f); +struct file *get_file_active(struct file **f); + #define file_count(x) atomic_long_read(&(x)->f_count) #define MAX_NON_LFS ((1UL<<31) - 1) @@ -1119,7 +1124,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_NOATIME BIT(10) /* Do not update access times. */ #define SB_NODIRATIME BIT(11) /* Do not update directory access times */ #define SB_SILENT BIT(15) -#define SB_POSIXACL BIT(16) /* VFS does not apply the umask */ +#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */ #define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */ #define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */ #define SB_I_VERSION BIT(23) /* Update inode I_version field */ @@ -1161,11 +1166,13 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ #define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 #define SB_I_UNTRUSTED_MOUNTER 0x00000040 +#define SB_I_EVM_UNSUPPORTED 0x00000080 #define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ #define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ #define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */ #define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */ +#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */ /* Possible states of 'frozen' field */ enum { @@ -1181,7 +1188,8 @@ enum { struct sb_writers { unsigned short frozen; /* Is sb frozen? */ - unsigned short freeze_holders; /* Who froze fs? */ + int freeze_kcount; /* How many kernel freeze requests? */ + int freeze_ucount; /* How many userspace freeze requests? */ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS]; }; @@ -1206,7 +1214,7 @@ struct super_block { #ifdef CONFIG_SECURITY void *s_security; #endif - const struct xattr_handler **s_xattr; + const struct xattr_handler * const *s_xattr; #ifdef CONFIG_FS_ENCRYPTION const struct fscrypt_operations *s_cop; struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */ @@ -1221,6 +1229,7 @@ struct super_block { struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ struct block_device *s_bdev; + struct bdev_handle *s_bdev_handle; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; @@ -1265,7 +1274,7 @@ struct super_block { const struct dentry_operations *s_d_op; /* default d_op for dentries */ - struct shrinker s_shrink; /* per-sb shrinker handle */ + struct shrinker *s_shrink; /* per-sb shrinker handle */ /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; @@ -1508,56 +1517,84 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb, kgid_has_mapping(fs_userns, kgid); } -struct timespec64 current_mgtime(struct inode *inode); struct timespec64 current_time(struct inode *inode); struct timespec64 inode_set_ctime_current(struct inode *inode); -/* - * Multigrain timestamps - * - * Conditionally use fine-grained ctime and mtime timestamps when there - * are users actively observing them via getattr. The primary use-case - * for this is NFS clients that use the ctime to distinguish between - * different states of the file, and that are often fooled by multiple - * operations that occur in the same coarse-grained timer tick. - * - * The kernel always keeps normalized struct timespec64 values in the ctime, - * which means that only the first 30 bits of the value are used. Use the - * 31st bit of the ctime's tv_nsec field as a flag to indicate that the value - * has been queried since it was last updated. - */ -#define I_CTIME_QUERIED (1L<<30) +static inline time64_t inode_get_atime_sec(const struct inode *inode) +{ + return inode->__i_atime.tv_sec; +} -/** - * inode_get_ctime - fetch the current ctime from the inode - * @inode: inode from which to fetch ctime - * - * Grab the current ctime tv_nsec field from the inode, mask off the - * I_CTIME_QUERIED flag and return it. This is mostly intended for use by - * internal consumers of the ctime that aren't concerned with ensuring a - * fine-grained update on the next change (e.g. when preparing to store - * the value in the backing store for later retrieval). - * - * This is safe to call regardless of whether the underlying filesystem - * is using multigrain timestamps. - */ -static inline struct timespec64 inode_get_ctime(const struct inode *inode) +static inline long inode_get_atime_nsec(const struct inode *inode) { - struct timespec64 ctime; + return inode->__i_atime.tv_nsec; +} - ctime.tv_sec = inode->__i_ctime.tv_sec; - ctime.tv_nsec = inode->__i_ctime.tv_nsec & ~I_CTIME_QUERIED; +static inline struct timespec64 inode_get_atime(const struct inode *inode) +{ + return inode->__i_atime; +} - return ctime; +static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode, + struct timespec64 ts) +{ + inode->__i_atime = ts; + return ts; +} + +static inline struct timespec64 inode_set_atime(struct inode *inode, + time64_t sec, long nsec) +{ + struct timespec64 ts = { .tv_sec = sec, + .tv_nsec = nsec }; + return inode_set_atime_to_ts(inode, ts); +} + +static inline time64_t inode_get_mtime_sec(const struct inode *inode) +{ + return inode->__i_mtime.tv_sec; +} + +static inline long inode_get_mtime_nsec(const struct inode *inode) +{ + return inode->__i_mtime.tv_nsec; +} + +static inline struct timespec64 inode_get_mtime(const struct inode *inode) +{ + return inode->__i_mtime; +} + +static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode, + struct timespec64 ts) +{ + inode->__i_mtime = ts; + return ts; +} + +static inline struct timespec64 inode_set_mtime(struct inode *inode, + time64_t sec, long nsec) +{ + struct timespec64 ts = { .tv_sec = sec, + .tv_nsec = nsec }; + return inode_set_mtime_to_ts(inode, ts); +} + +static inline time64_t inode_get_ctime_sec(const struct inode *inode) +{ + return inode->__i_ctime.tv_sec; +} + +static inline long inode_get_ctime_nsec(const struct inode *inode) +{ + return inode->__i_ctime.tv_nsec; +} + +static inline struct timespec64 inode_get_ctime(const struct inode *inode) +{ + return inode->__i_ctime; } -/** - * inode_set_ctime_to_ts - set the ctime in the inode - * @inode: inode in which to set the ctime - * @ts: value to set in the ctime field - * - * Set the ctime in @inode to @ts - */ static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts) { @@ -1582,6 +1619,8 @@ static inline struct timespec64 inode_set_ctime(struct inode *inode, return inode_set_ctime_to_ts(inode, ts); } +struct timespec64 simple_inode_init_ts(struct inode *inode); + /* * Snapshotting support. */ @@ -1610,9 +1649,70 @@ static inline bool __sb_start_write_trylock(struct super_block *sb, int level) #define __sb_writers_release(sb, lev) \ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) +/** + * __sb_write_started - check if sb freeze level is held + * @sb: the super we write to + * @level: the freeze level + * + * * > 0 - sb freeze level is held + * * 0 - sb freeze level is not held + * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN + */ +static inline int __sb_write_started(const struct super_block *sb, int level) +{ + return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1); +} + +/** + * sb_write_started - check if SB_FREEZE_WRITE is held + * @sb: the super we write to + * + * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN. + */ static inline bool sb_write_started(const struct super_block *sb) { - return lockdep_is_held_type(sb->s_writers.rw_sem + SB_FREEZE_WRITE - 1, 1); + return __sb_write_started(sb, SB_FREEZE_WRITE); +} + +/** + * sb_write_not_started - check if SB_FREEZE_WRITE is not held + * @sb: the super we write to + * + * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN. + */ +static inline bool sb_write_not_started(const struct super_block *sb) +{ + return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0; +} + +/** + * file_write_started - check if SB_FREEZE_WRITE is held + * @file: the file we write to + * + * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN. + * May be false positive with !S_ISREG, because file_start_write() has + * no effect on !S_ISREG. + */ +static inline bool file_write_started(const struct file *file) +{ + if (!S_ISREG(file_inode(file)->i_mode)) + return true; + return sb_write_started(file_inode(file)->i_sb); +} + +/** + * file_write_not_started - check if SB_FREEZE_WRITE is not held + * @file: the file we write to + * + * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN. + * May be false positive with !S_ISREG, because file_start_write() has + * no effect on !S_ISREG. + */ +static inline bool file_write_not_started(const struct file *file) +{ + if (!S_ISREG(file_inode(file)->i_mode)) + return true; + return sb_write_not_started(file_inode(file)->i_sb); } /** @@ -1994,9 +2094,6 @@ extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); -extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - size_t len, unsigned int flags); int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *len, unsigned int remap_flags, @@ -2016,9 +2113,24 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, struct file *dst_file, loff_t dst_pos, loff_t len, unsigned int remap_flags); +/** + * enum freeze_holder - holder of the freeze + * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem + * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem + * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed + * + * Indicate who the owner of the freeze or thaw request is and whether + * the freeze needs to be exclusive or can nest. + * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the + * same holder aren't allowed. It is however allowed to hold a single + * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at + * the same time. This is relied upon by some filesystems during online + * repair or similar. + */ enum freeze_holder { FREEZE_HOLDER_KERNEL = (1U << 0), FREEZE_HOLDER_USERSPACE = (1U << 1), + FREEZE_MAY_NEST = (1U << 2), }; struct super_operations { @@ -2110,7 +2222,12 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) + +#ifdef CONFIG_FS_POSIX_ACL #define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL) +#else +#define IS_POSIXACL(inode) 0 +#endif #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) @@ -2254,7 +2371,7 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, #define I_CREATING (1 << 15) #define I_DONTCACHE (1 << 16) #define I_SYNC_QUEUED (1 << 17) -#define I_PINNING_FSCACHE_WB (1 << 18) +#define I_PINNING_NETFS_WB (1 << 18) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) @@ -2334,7 +2451,6 @@ struct file_system_type { #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ -#define FS_MGTIME 64 /* FS uses multigrain timestamps */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_spec *parameters; @@ -2358,17 +2474,6 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) -/** - * is_mgtime: is this inode using multigrain timestamps - * @inode: inode to test for multigrain timestamps - * - * Return true if the inode uses multigrain timestamps, false otherwise. - */ -static inline bool is_mgtime(const struct inode *inode) -{ - return inode->i_sb->s_type->fs_flags & FS_MGTIME; -} - extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); @@ -2444,13 +2549,13 @@ struct audit_names; struct filename { const char *name; /* pointer to actual string */ const __user char *uptr; /* original userland pointer */ - int refcnt; + atomic_t refcnt; struct audit_names *aname; const char iname[]; }; static_assert(offsetof(struct filename, iname) % sizeof(long) == 0); -static inline struct mnt_idmap *file_mnt_idmap(struct file *file) +static inline struct mnt_idmap *file_mnt_idmap(const struct file *file) { return mnt_idmap(file->f_path.mnt); } @@ -2489,26 +2594,31 @@ struct file *dentry_open(const struct path *path, int flags, const struct cred *creds); struct file *dentry_create(const struct path *path, int flags, umode_t mode, const struct cred *cred); -struct file *backing_file_open(const struct path *path, int flags, - const struct path *real_path, - const struct cred *cred); -struct path *backing_file_real_path(struct file *f); +struct path *backing_file_user_path(struct file *f); /* - * file_real_path - get the path corresponding to f_inode - * - * When opening a backing file for a stackable filesystem (e.g., - * overlayfs) f_path may be on the stackable filesystem and f_inode on - * the underlying filesystem. When the path associated with f_inode is - * needed, this helper should be used instead of accessing f_path - * directly. -*/ -static inline const struct path *file_real_path(struct file *f) + * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file + * stored in ->vm_file is a backing file whose f_inode is on the underlying + * filesystem. When the mapped file path and inode number are displayed to + * user (e.g. via /proc/<pid>/maps), these helpers should be used to get the + * path and inode number to display to the user, which is the path of the fd + * that user has requested to map and the inode number that would be returned + * by fstat() on that same fd. + */ +/* Get the path to display in /proc/<pid>/maps */ +static inline const struct path *file_user_path(struct file *f) { if (unlikely(f->f_mode & FMODE_BACKING)) - return backing_file_real_path(f); + return backing_file_user_path(f); return &f->f_path; } +/* Get the inode whose inode number to display in /proc/<pid>/maps */ +static inline const struct inode *file_user_inode(struct file *f) +{ + if (unlikely(f->f_mode & FMODE_BACKING)) + return d_inode(backing_file_user_path(f)->dentry); + return file_inode(f); +} static inline struct file *file_clone_open(struct file *file) { @@ -2963,8 +3073,6 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos, size_t len, unsigned int flags); extern ssize_t iter_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); -extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, - loff_t *opos, size_t len, unsigned int flags); extern void @@ -3054,7 +3162,6 @@ extern void page_put_link(void *); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); -void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode); void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *); void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); @@ -3094,7 +3201,6 @@ extern int vfs_readlink(struct dentry *, char __user *, int); extern struct file_system_type *get_filesystem(struct file_system_type *fs); extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); -extern struct super_block *get_active_super(struct block_device *bdev); extern void drop_super(struct super_block *sb); extern void drop_super_exclusive(struct super_block *sb); extern void iterate_supers(void (*)(struct super_block *, void *), void *); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 96332db693d5..c13e99cbbf81 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -136,6 +136,8 @@ extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc); extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param); extern int vfs_parse_fs_string(struct fs_context *fc, const char *key, const char *value, size_t v_size); +int vfs_parse_monolithic_sep(struct fs_context *fc, void *data, + char *(*sep)(char **)); extern int generic_parse_monolithic(struct fs_context *fc, void *data); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h index 010d39d0dc1c..2b1f74b24070 100644 --- a/include/linux/fs_stack.h +++ b/include/linux/fs_stack.h @@ -16,14 +16,14 @@ extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src); static inline void fsstack_copy_attr_atime(struct inode *dest, const struct inode *src) { - dest->i_atime = src->i_atime; + inode_set_atime_to_ts(dest, inode_get_atime(src)); } static inline void fsstack_copy_attr_times(struct inode *dest, const struct inode *src) { - dest->i_atime = src->i_atime; - dest->i_mtime = src->i_mtime; + inode_set_atime_to_ts(dest, inode_get_atime(src)); + inode_set_mtime_to_ts(dest, inode_get_mtime(src)); inode_set_ctime_to_ts(dest, inode_get_ctime(src)); } diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index a174cedf4d90..bdf7f3eddf0a 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -189,17 +189,20 @@ extern atomic_t fscache_n_write; extern atomic_t fscache_n_no_write_space; extern atomic_t fscache_n_no_create_space; extern atomic_t fscache_n_culled; +extern atomic_t fscache_n_dio_misfit; #define fscache_count_read() atomic_inc(&fscache_n_read) #define fscache_count_write() atomic_inc(&fscache_n_write) #define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space) #define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space) #define fscache_count_culled() atomic_inc(&fscache_n_culled) +#define fscache_count_dio_misfit() atomic_inc(&fscache_n_dio_misfit) #else #define fscache_count_read() do {} while(0) #define fscache_count_write() do {} while(0) #define fscache_count_no_write_space() do {} while(0) #define fscache_count_no_create_space() do {} while(0) #define fscache_count_culled() do {} while(0) +#define fscache_count_dio_misfit() do {} while(0) #endif #endif /* _LINUX_FSCACHE_CACHE_H */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 8e312c8323a8..6e8562cbcc43 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -437,9 +437,6 @@ const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_r * indicates the cache resources to which the operation state should be * attached; @cookie indicates the cache object that will be accessed. * - * This is intended to be called from the ->begin_cache_operation() netfs lib - * operation as implemented by the network filesystem. - * * @cres->inval_counter is set from @cookie->inval_counter for comparison at * the end of the operation. This allows invalidation during the operation to * be detected by the caller. @@ -629,48 +626,6 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie, } -#if __fscache_available -bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio, - struct fscache_cookie *cookie); -#else -#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \ - filemap_dirty_folio(MAPPING, FOLIO) -#endif - -/** - * fscache_unpin_writeback - Unpin writeback resources - * @wbc: The writeback control - * @cookie: The cookie referring to the cache object - * - * Unpin the writeback resources pinned by fscache_dirty_folio(). This is - * intended to be called by the netfs's ->write_inode() method. - */ -static inline void fscache_unpin_writeback(struct writeback_control *wbc, - struct fscache_cookie *cookie) -{ - if (wbc->unpinned_fscache_wb) - fscache_unuse_cookie(cookie, NULL, NULL); -} - -/** - * fscache_clear_inode_writeback - Clear writeback resources pinned by an inode - * @cookie: The cookie referring to the cache object - * @inode: The inode to clean up - * @aux: Auxiliary data to apply to the inode - * - * Clear any writeback resources held by an inode when the inode is evicted. - * This must be called before clear_inode() is called. - */ -static inline void fscache_clear_inode_writeback(struct fscache_cookie *cookie, - struct inode *inode, - const void *aux) -{ - if (inode->i_state & I_PINNING_FSCACHE_WB) { - loff_t i_size = i_size_read(inode); - fscache_unuse_cookie(cookie, aux, &i_size); - } -} - /** * fscache_note_page_release - Note that a netfs page got released * @cookie: The cookie corresponding to the file diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index c895b12737a1..12f9e455d569 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -31,7 +31,7 @@ #define FSCRYPT_CONTENTS_ALIGNMENT 16 union fscrypt_policy; -struct fscrypt_info; +struct fscrypt_inode_info; struct fs_parameter; struct seq_file; @@ -59,26 +59,55 @@ struct fscrypt_name { #ifdef CONFIG_FS_ENCRYPTION -/* - * If set, the fscrypt bounce page pool won't be allocated (unless another - * filesystem needs it). Set this if the filesystem always uses its own bounce - * pages for writes and therefore won't need the fscrypt bounce page pool. - */ -#define FS_CFLG_OWN_PAGES (1U << 1) - /* Crypto operations for filesystems */ struct fscrypt_operations { - /* Set of optional flags; see above for allowed flags */ - unsigned int flags; + /* + * If set, then fs/crypto/ will allocate a global bounce page pool the + * first time an encryption key is set up for a file. The bounce page + * pool is required by the following functions: + * + * - fscrypt_encrypt_pagecache_blocks() + * - fscrypt_zeroout_range() for files not using inline crypto + * + * If the filesystem doesn't use those, it doesn't need to set this. + */ + unsigned int needs_bounce_pages : 1; /* - * If set, this is a filesystem-specific key description prefix that - * will be accepted for "logon" keys for v1 fscrypt policies, in - * addition to the generic prefix "fscrypt:". This functionality is - * deprecated, so new filesystems shouldn't set this field. + * If set, then fs/crypto/ will allow the use of encryption settings + * that assume inode numbers fit in 32 bits (i.e. + * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other + * prerequisites for these settings are also met. This is only useful + * if the filesystem wants to support inline encryption hardware that is + * limited to 32-bit or 64-bit data unit numbers and where programming + * keyslots is very slow. */ - const char *key_prefix; + unsigned int has_32bit_inodes : 1; + + /* + * If set, then fs/crypto/ will allow users to select a crypto data unit + * size that is less than the filesystem block size. This is done via + * the log2_data_unit_size field of the fscrypt policy. This flag is + * not compatible with filesystems that encrypt variable-length blocks + * (i.e. blocks that aren't all equal to filesystem's block size), for + * example as a result of compression. It's also not compatible with + * the fscrypt_encrypt_block_inplace() and + * fscrypt_decrypt_block_inplace() functions. + */ + unsigned int supports_subblock_data_units : 1; + + /* + * This field exists only for backwards compatibility reasons and should + * only be set by the filesystems that are setting it already. It + * contains the filesystem-specific key description prefix that is + * accepted for "logon" keys for v1 fscrypt policies. This + * functionality is deprecated in favor of the generic prefix + * "fscrypt:", which itself is deprecated in favor of the filesystem + * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that + * are newly adding fscrypt support should not set this field. + */ + const char *legacy_key_prefix; /* * Get the fscrypt context of the given inode. @@ -146,21 +175,6 @@ struct fscrypt_operations { bool (*has_stable_inodes)(struct super_block *sb); /* - * Get the number of bits that the filesystem uses to represent inode - * numbers and file logical block numbers. - * - * By default, both of these are assumed to be 64-bit. This function - * can be implemented to declare that either or both of these numbers is - * shorter, which may allow the use of the - * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of - * inline crypto hardware whose maximum DUN length is less than 64 bits - * (e.g., eMMC v5.2 spec compliant hardware). This function only needs - * to be implemented if support for one of these features is needed. - */ - void (*get_ino_and_lblk_bits)(struct super_block *sb, - int *ino_bits_ret, int *lblk_bits_ret); - - /* * Return an array of pointers to the block devices to which the * filesystem may write encrypted file contents, NULL if the filesystem * only has a single such block device, or an ERR_PTR() on error. @@ -178,7 +192,8 @@ struct fscrypt_operations { unsigned int *num_devs); }; -static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) +static inline struct fscrypt_inode_info * +fscrypt_get_inode_info(const struct inode *inode) { /* * Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info(). @@ -390,7 +405,8 @@ static inline void fscrypt_set_ops(struct super_block *sb, } #else /* !CONFIG_FS_ENCRYPTION */ -static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) +static inline struct fscrypt_inode_info * +fscrypt_get_inode_info(const struct inode *inode) { return NULL; } @@ -868,7 +884,7 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) */ static inline bool fscrypt_has_encryption_key(const struct inode *inode) { - return fscrypt_get_info(inode) != NULL; + return fscrypt_get_inode_info(inode) != NULL; } /** diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index ed48e4f1e755..8300a5286988 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -96,35 +96,73 @@ static inline int fsnotify_file(struct file *file, __u32 mask) if (file->f_mode & FMODE_NONOTIFY) return 0; - /* Overlayfs internal files have fake f_path */ - path = file_real_path(file); + path = &file->f_path; return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); } -/* Simple call site for access decisions */ -static inline int fsnotify_perm(struct file *file, int mask) +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +/* + * fsnotify_file_area_perm - permission hook before access to file range + */ +static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, + const loff_t *ppos, size_t count) { - int ret; - __u32 fsnotify_mask = 0; + __u32 fsnotify_mask = FS_ACCESS_PERM; + + /* + * filesystem may be modified in the context of permission events + * (e.g. by HSM filling a file on access), so sb freeze protection + * must not be held. + */ + lockdep_assert_once(file_write_not_started(file)); - if (!(mask & (MAY_READ | MAY_OPEN))) + if (!(perm_mask & MAY_READ)) return 0; - if (mask & MAY_OPEN) { - fsnotify_mask = FS_OPEN_PERM; + return fsnotify_file(file, fsnotify_mask); +} - if (file->f_flags & __FMODE_EXEC) { - ret = fsnotify_file(file, FS_OPEN_EXEC_PERM); +/* + * fsnotify_file_perm - permission hook before file access + */ +static inline int fsnotify_file_perm(struct file *file, int perm_mask) +{ + return fsnotify_file_area_perm(file, perm_mask, NULL, 0); +} - if (ret) - return ret; - } - } else if (mask & MAY_READ) { - fsnotify_mask = FS_ACCESS_PERM; +/* + * fsnotify_open_perm - permission hook before file open + */ +static inline int fsnotify_open_perm(struct file *file) +{ + int ret; + + if (file->f_flags & __FMODE_EXEC) { + ret = fsnotify_file(file, FS_OPEN_EXEC_PERM); + if (ret) + return ret; } - return fsnotify_file(file, fsnotify_mask); + return fsnotify_file(file, FS_OPEN_PERM); +} + +#else +static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, + const loff_t *ppos, size_t count) +{ + return 0; +} + +static inline int fsnotify_file_perm(struct file *file, int perm_mask) +{ + return 0; +} + +static inline int fsnotify_open_perm(struct file *file) +{ + return 0; } +#endif /* * fsnotify_link_count - inode's link count changed diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index c0892d75ce33..7f63be5ca0f1 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -472,10 +472,8 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; struct fsnotify_mark_connector { spinlock_t lock; unsigned short type; /* Type of object [lock] */ -#define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01 #define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02 unsigned short flags; /* flags [lock] */ - __kernel_fsid_t fsid; /* fsid of filesystem containing object */ union { /* Object pointer [lock] */ fsnotify_connp_t *obj; @@ -530,6 +528,8 @@ struct fsnotify_mark { #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100 #define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200 #define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400 +#define FSNOTIFY_MARK_FLAG_HAS_FSID 0x0800 +#define FSNOTIFY_MARK_FLAG_WEAK_FSID 0x1000 unsigned int flags; /* flags [mark->lock] */ }; @@ -763,11 +763,10 @@ extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, /* attach the mark to the object */ extern int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp, unsigned int obj_type, - int add_flags, __kernel_fsid_t *fsid); + int add_flags); extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, fsnotify_connp_t *connp, - unsigned int obj_type, int add_flags, - __kernel_fsid_t *fsid); + unsigned int obj_type, int add_flags); /* attach the mark to the inode */ static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, @@ -775,15 +774,14 @@ static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, int add_flags) { return fsnotify_add_mark(mark, &inode->i_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_INODE, add_flags, NULL); + FSNOTIFY_OBJ_TYPE_INODE, add_flags); } static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, struct inode *inode, int add_flags) { return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_INODE, add_flags, - NULL); + FSNOTIFY_OBJ_TYPE_INODE, add_flags); } /* given a group and a mark, flag mark to be freed when all references are dropped */ diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h new file mode 100644 index 000000000000..95421860397a --- /dev/null +++ b/include/linux/fw_table.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * fw_tables.h - Parsing support for ACPI and ACPI-like tables provided by + * platform or device firmware + * + * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2023 Intel Corp. + */ +#ifndef _FW_TABLE_H_ +#define _FW_TABLE_H_ + +union acpi_subtable_headers; + +typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header, + const unsigned long end); + +typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header, + void *arg, const unsigned long end); + +struct acpi_subtable_proc { + int id; + acpi_tbl_entry_handler handler; + acpi_tbl_entry_handler_arg handler_arg; + void *arg; + int count; +}; + +union fw_table_header { + struct acpi_table_header acpi; + struct acpi_table_cdat cdat; +}; + +union acpi_subtable_headers { + struct acpi_subtable_header common; + struct acpi_hmat_structure hmat; + struct acpi_prmt_module_header prmt; + struct acpi_cedt_header cedt; + struct acpi_cdat_header cdat; +}; + +int acpi_parse_entries_array(char *id, unsigned long table_size, + union fw_table_header *table_header, + struct acpi_subtable_proc *proc, + int proc_num, unsigned int max_entries); + +int cdat_table_parse(enum acpi_cdat_type type, + acpi_tbl_entry_handler_arg handler_arg, void *arg, + struct acpi_table_cdat *table_header); + +/* CXL is the only non-ACPI consumer of the FIRMWARE_TABLE library */ +#if IS_ENABLED(CONFIG_ACPI) && !IS_ENABLED(CONFIG_CXL_BUS) +#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x) +#define __init_or_fwtbl_lib __init_or_acpilib +#else +#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, CXL) +#define __init_or_fwtbl_lib +#endif + +#endif diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 5700451b300f..2a72f55d26eb 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -41,6 +41,8 @@ struct device; struct fwnode_handle { struct fwnode_handle *secondary; const struct fwnode_operations *ops; + + /* The below is used solely by device links, don't use otherwise */ struct device *dev; struct list_head suppliers; struct list_head consumers; diff --git a/include/linux/gameport.h b/include/linux/gameport.h index 0a221e768ea4..07e370113b2b 100644 --- a/include/linux/gameport.h +++ b/include/linux/gameport.h @@ -63,7 +63,7 @@ struct gameport_driver { int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode); void gameport_close(struct gameport *gameport); -#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) +#if IS_REACHABLE(CONFIG_GAMEPORT) void __gameport_register_port(struct gameport *gameport, struct module *owner); /* use a define to avoid include chaining to get THIS_MODULE */ diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index 107613f7d792..847413164738 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -38,6 +38,7 @@ #include <asm/page.h> #include <linux/bug.h> +#include <linux/limits.h> #include <linux/log2.h> #include <linux/math.h> #include <linux/types.h> @@ -116,6 +117,11 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size) #define __genradix_cast(_radix) (typeof((_radix)->type[0]) *) #define __genradix_obj_size(_radix) sizeof((_radix)->type[0]) +#define __genradix_objs_per_page(_radix) \ + (PAGE_SIZE / sizeof((_radix)->type[0])) +#define __genradix_page_remainder(_radix) \ + (PAGE_SIZE % sizeof((_radix)->type[0])) + #define __genradix_idx_to_offset(_radix, _idx) \ __idx_to_offset(_idx, __genradix_obj_size(_radix)) @@ -179,11 +185,35 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t); #define genradix_iter_peek(_iter, _radix) \ (__genradix_cast(_radix) \ __genradix_iter_peek(_iter, &(_radix)->tree, \ - PAGE_SIZE / __genradix_obj_size(_radix))) + __genradix_objs_per_page(_radix))) + +void *__genradix_iter_peek_prev(struct genradix_iter *, struct __genradix *, + size_t, size_t); + +/** + * genradix_iter_peek_prev - get first entry at or below iterator's current + * position + * @_iter: a genradix_iter + * @_radix: genradix being iterated over + * + * If no more entries exist at or below @_iter's current position, returns NULL + */ +#define genradix_iter_peek_prev(_iter, _radix) \ + (__genradix_cast(_radix) \ + __genradix_iter_peek_prev(_iter, &(_radix)->tree, \ + __genradix_objs_per_page(_radix), \ + __genradix_obj_size(_radix) + \ + __genradix_page_remainder(_radix))) static inline void __genradix_iter_advance(struct genradix_iter *iter, size_t obj_size) { + if (iter->offset + obj_size < iter->offset) { + iter->offset = SIZE_MAX; + iter->pos = SIZE_MAX; + return; + } + iter->offset += obj_size; if (!is_power_of_2(obj_size) && @@ -196,6 +226,25 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter, #define genradix_iter_advance(_iter, _radix) \ __genradix_iter_advance(_iter, __genradix_obj_size(_radix)) +static inline void __genradix_iter_rewind(struct genradix_iter *iter, + size_t obj_size) +{ + if (iter->offset == 0 || + iter->offset == SIZE_MAX) { + iter->offset = SIZE_MAX; + return; + } + + if ((iter->offset & (PAGE_SIZE - 1)) == 0) + iter->offset -= PAGE_SIZE % obj_size; + + iter->offset -= obj_size; + iter->pos--; +} + +#define genradix_iter_rewind(_iter, _radix) \ + __genradix_iter_rewind(_iter, __genradix_obj_size(_radix)) + #define genradix_for_each_from(_radix, _iter, _p, _start) \ for (_iter = genradix_iter_init(_radix, _start); \ (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \ @@ -213,6 +262,23 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter, #define genradix_for_each(_radix, _iter, _p) \ genradix_for_each_from(_radix, _iter, _p, 0) +#define genradix_last_pos(_radix) \ + (SIZE_MAX / PAGE_SIZE * __genradix_objs_per_page(_radix) - 1) + +/** + * genradix_for_each_reverse - iterate over entry in a genradix, reverse order + * @_radix: genradix to iterate over + * @_iter: a genradix_iter to track current position + * @_p: pointer to genradix entry type + * + * On every iteration, @_p will point to the current entry, and @_iter.pos + * will be the current entry's index. + */ +#define genradix_for_each_reverse(_radix, _iter, _p) \ + for (_iter = genradix_iter_init(_radix, genradix_last_pos(_radix));\ + (_p = genradix_iter_peek_prev(&_iter, _radix)) != NULL;\ + genradix_iter_rewind(&_iter, _radix)) + int __genradix_prealloc(struct __genradix *, size_t, gfp_t); /** diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 665f06675c83..de292a007138 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -8,6 +8,7 @@ #include <linux/topology.h> struct vm_area_struct; +struct mempolicy; /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) @@ -262,7 +263,9 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, #ifdef CONFIG_NUMA struct page *alloc_pages(gfp_t gfp, unsigned int order); -struct folio *folio_alloc(gfp_t gfp, unsigned order); +struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, + struct mempolicy *mpol, pgoff_t ilx, int nid); +struct folio *folio_alloc(gfp_t gfp, unsigned int order); struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage); #else @@ -270,6 +273,11 @@ static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) { return alloc_pages_node(numa_node_id(), gfp_mask, order); } +static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, + struct mempolicy *mpol, pgoff_t ilx, int nid) +{ + return alloc_pages(gfp, order); +} static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) { return __folio_alloc_node(gfp, order, numa_node_id()); @@ -320,11 +328,13 @@ extern void page_frag_free(void *addr); #define free_page(addr) free_pages((addr), 0) void page_alloc_init_cpuhp(void); +int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); void drain_local_pages(struct zone *zone); void page_alloc_init_late(void); +void setup_pcp_cacheinfo(void); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 6583a58670c5..1b6053da8754 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -162,25 +162,25 @@ typedef unsigned int __bitwise gfp_t; * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. * * The default allocator behavior depends on the request size. We have a concept - * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). + * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). * !costly allocations are too essential to fail so they are implicitly * non-failing by default (with some exceptions like OOM victims might fail so * the caller still has to check for failures) while costly requests try to be * not disruptive and back off even without invoking the OOM killer. * The following three modifiers might be used to override some of these - * implicit rules + * implicit rules. * * %__GFP_NORETRY: The VM implementation will try only very lightweight * memory direct reclaim to get some memory under memory pressure (thus * it can sleep). It will avoid disruptive actions like OOM killer. The * caller must handle the failure which is quite likely to happen under * heavy memory pressure. The flag is suitable when failure can easily be - * handled at small cost, such as reduced throughput + * handled at small cost, such as reduced throughput. * * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim * procedures that have previously failed if there is some indication - * that progress has been made else where. It can wait for other - * tasks to attempt high level approaches to freeing memory such as + * that progress has been made elsewhere. It can wait for other + * tasks to attempt high-level approaches to freeing memory such as * compaction (which removes fragmentation) and page-out. * There is still a definite limit to the number of retries, but it is * a larger limit than with %__GFP_NORETRY. @@ -230,7 +230,7 @@ typedef unsigned int __bitwise gfp_t; * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting * memory tags at the same time as zeroing memory has minimal additional - * performace impact. + * performance impact. * * %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation. * Used for userspace and vmalloc pages; the latter are unpoisoned by @@ -274,7 +274,8 @@ typedef unsigned int __bitwise gfp_t; * accounted to kmemcg. * * %GFP_NOWAIT is for kernel allocations that should not stall for direct - * reclaim, start physical IO or use any filesystem callback. + * reclaim, start physical IO or use any filesystem callback. It is very + * likely to fail to allocate memory, even for very small allocations. * * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages * that do not require the starting of any physical IO. @@ -325,7 +326,7 @@ typedef unsigned int __bitwise gfp_t; #define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) #define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) -#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) +#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN) #define GFP_NOIO (__GFP_RECLAIM) #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 1c4385a00f88..db2dfbae8edb 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -159,7 +159,6 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, int gpiod_set_config(struct gpio_desc *desc, unsigned long config); int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce); -int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); void gpiod_toggle_active_low(struct gpio_desc *desc); int gpiod_is_active_low(const struct gpio_desc *desc); @@ -494,13 +493,6 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int deboun return -ENOSYS; } -static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) -{ - /* GPIO can never have been requested */ - WARN_ON(desc); - return -ENOSYS; -} - static inline void gpiod_toggle_active_low(struct gpio_desc *desc) { /* GPIO can never have been requested */ @@ -614,8 +606,6 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev); int devm_acpi_dev_add_driver_gpios(struct device *dev, const struct acpi_gpio_mapping *gpios); -struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label); - #else /* CONFIG_GPIOLIB && CONFIG_ACPI */ #include <linux/err.h> @@ -633,12 +623,6 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, return -ENXIO; } -static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, - char *label) -{ - return ERR_PTR(-ENOSYS); -} - #endif /* CONFIG_GPIOLIB && CONFIG_ACPI */ diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 4f0c5d62c8f3..9a5c6c76e653 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -3,6 +3,8 @@ #define __LINUX_GPIO_DRIVER_H #include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/err.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/irqhandler.h> @@ -529,29 +531,49 @@ struct gpio_chip { #endif /* CONFIG_OF_GPIO */ }; -extern const char *gpiochip_is_requested(struct gpio_chip *gc, - unsigned int offset); +char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset); + + +struct _gpiochip_for_each_data { + const char **label; + unsigned int *i; +}; + +DEFINE_CLASS(_gpiochip_for_each_data, + struct _gpiochip_for_each_data, + if (*_T.label) kfree(*_T.label), + ({ + struct _gpiochip_for_each_data _data = { label, i }; + *_data.i = 0; + _data; + }), + const char **label, int *i) /** * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range - * @chip: the chip to query - * @i: loop variable - * @base: first GPIO in the range - * @size: amount of GPIOs to check starting from @base - * @label: label of current GPIO + * @_chip: the chip to query + * @_i: loop variable + * @_base: first GPIO in the range + * @_size: amount of GPIOs to check starting from @base + * @_label: label of current GPIO */ -#define for_each_requested_gpio_in_range(chip, i, base, size, label) \ - for (i = 0; i < size; i++) \ - if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else +#define for_each_requested_gpio_in_range(_chip, _i, _base, _size, _label) \ + for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \ + *_data.i < _size; \ + (*_data.i)++, kfree(*(_data.label)), *_data.label = NULL) \ + if ((*_data.label = \ + gpiochip_dup_line_label(_chip, _base + *_data.i)) == NULL) {} \ + else if (IS_ERR(*_data.label)) {} \ + else /* Iterates over all requested GPIO of the given @chip */ #define for_each_requested_gpio(chip, i, label) \ for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label) /* add/remove chips */ -extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, - struct lock_class_key *lock_key, - struct lock_class_key *request_key); +int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, + struct lock_class_key *lock_key, + struct lock_class_key *request_key); /** * gpiochip_add_data() - register a gpio_chip @@ -599,13 +621,23 @@ static inline int gpiochip_add(struct gpio_chip *gc) { return gpiochip_add_data(gc, NULL); } -extern void gpiochip_remove(struct gpio_chip *gc); -extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data, - struct lock_class_key *lock_key, - struct lock_class_key *request_key); +void gpiochip_remove(struct gpio_chip *gc); +int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, + void *data, struct lock_class_key *lock_key, + struct lock_class_key *request_key); + +struct gpio_device *gpio_device_find(void *data, + int (*match)(struct gpio_chip *gc, void *data)); +struct gpio_device *gpio_device_find_by_label(const char *label); +struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode); + +struct gpio_device *gpio_device_get(struct gpio_device *gdev); +void gpio_device_put(struct gpio_device *gdev); -extern struct gpio_chip *gpiochip_find(void *data, - int (*match)(struct gpio_chip *gc, void *data)); +DEFINE_FREE(gpio_device_put, struct gpio_device *, + if (!IS_ERR_OR_NULL(_T)) gpio_device_put(_T)) + +struct device *gpio_device_to_device(struct gpio_device *gdev); bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset); int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset); @@ -690,7 +722,6 @@ int gpiochip_irqchip_add_domain(struct gpio_chip *gc, #else #include <asm/bug.h> -#include <asm/errno.h> static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc, struct irq_domain *domain) @@ -758,18 +789,26 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc, enum gpiod_flags dflags); void gpiochip_free_own_desc(struct gpio_desc *desc); +struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum); +struct gpio_desc * +gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum); + +struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev); + #ifdef CONFIG_GPIOLIB /* lock/unlock as IRQ */ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset); void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset); - struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); +struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc); -#else /* CONFIG_GPIOLIB */ +/* struct gpio_device getters */ +int gpio_device_get_base(struct gpio_device *gdev); +const char *gpio_device_get_label(struct gpio_device *gdev); -#include <linux/err.h> +#else /* CONFIG_GPIOLIB */ #include <asm/bug.h> diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index 3f84aeb81e48..80fa930b04c6 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -21,6 +21,7 @@ struct device; * disable button via sysfs * @value: axis value for %EV_ABS * @irq: Irq number in case of interrupt keys + * @wakeirq: Optional dedicated wake-up interrupt */ struct gpio_keys_button { unsigned int code; @@ -34,6 +35,7 @@ struct gpio_keys_button { bool can_disable; int value; unsigned int irq; + unsigned int wakeirq; }; /** diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h new file mode 100644 index 000000000000..f316c8d0f3fc --- /dev/null +++ b/include/linux/habanalabs/cpucp_if.h @@ -0,0 +1,1423 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2020-2023 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef CPUCP_IF_H +#define CPUCP_IF_H + +#include <linux/types.h> +#include <linux/if_ether.h> + +#include "hl_boot_if.h" + +#define NUM_HBM_PSEUDO_CH 2 +#define NUM_HBM_CH_PER_DEV 8 +#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0 +#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK 0x00000001 +#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT 1 +#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK 0x00000002 +#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT 2 +#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK 0x00000004 +#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT 3 +#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK 0x00000008 +#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT 4 +#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK 0x00000010 +#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT 5 +#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK 0x00000020 +#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6 +#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0 + +#define PLL_MAP_MAX_BITS 128 +#define PLL_MAP_LEN (PLL_MAP_MAX_BITS / 8) + +enum eq_event_id { + EQ_EVENT_NIC_STS_REQUEST = 0, + EQ_EVENT_PWR_MODE_0, + EQ_EVENT_PWR_MODE_1, + EQ_EVENT_PWR_MODE_2, + EQ_EVENT_PWR_MODE_3, + EQ_EVENT_PWR_BRK_ENTRY, + EQ_EVENT_PWR_BRK_EXIT, + EQ_EVENT_HEARTBEAT, +}; + +/* + * info of the pkt queue pointers in the first async occurrence + */ +struct cpucp_pkt_sync_err { + __le32 pi; + __le32 ci; +}; + +struct hl_eq_hbm_ecc_data { + /* SERR counter */ + __le32 sec_cnt; + /* DERR counter */ + __le32 dec_cnt; + /* Supplemental Information according to the mask bits */ + __le32 hbm_ecc_info; + /* Address in hbm where the ecc happened */ + __le32 first_addr; + /* SERR continuous address counter */ + __le32 sec_cont_cnt; + __le32 pad; +}; + +/* + * EVENT QUEUE + */ + +struct hl_eq_header { + __le32 reserved; + __le32 ctl; +}; + +struct hl_eq_ecc_data { + __le64 ecc_address; + __le64 ecc_syndrom; + __u8 memory_wrapper_idx; + __u8 is_critical; + __le16 block_id; + __u8 pad[4]; +}; + +enum hl_sm_sei_cause { + SM_SEI_SO_OVERFLOW, + SM_SEI_LBW_4B_UNALIGNED, + SM_SEI_AXI_RESPONSE_ERR +}; + +struct hl_eq_sm_sei_data { + __le32 sei_log; + /* enum hl_sm_sei_cause */ + __u8 sei_cause; + __u8 pad[3]; +}; + +enum hl_fw_alive_severity { + FW_ALIVE_SEVERITY_MINOR, + FW_ALIVE_SEVERITY_CRITICAL +}; + +struct hl_eq_fw_alive { + __le64 uptime_seconds; + __le32 process_id; + __le32 thread_id; + /* enum hl_fw_alive_severity */ + __u8 severity; + __u8 pad[7]; +}; + +struct hl_eq_intr_cause { + __le64 intr_cause_data; +}; + +struct hl_eq_pcie_drain_ind_data { + struct hl_eq_intr_cause intr_cause; + __le64 drain_wr_addr_lbw; + __le64 drain_rd_addr_lbw; + __le64 drain_wr_addr_hbw; + __le64 drain_rd_addr_hbw; +}; + +struct hl_eq_razwi_lbw_info_regs { + __le32 rr_aw_razwi_reg; + __le32 rr_aw_razwi_id_reg; + __le32 rr_ar_razwi_reg; + __le32 rr_ar_razwi_id_reg; +}; + +struct hl_eq_razwi_hbw_info_regs { + __le32 rr_aw_razwi_hi_reg; + __le32 rr_aw_razwi_lo_reg; + __le32 rr_aw_razwi_id_reg; + __le32 rr_ar_razwi_hi_reg; + __le32 rr_ar_razwi_lo_reg; + __le32 rr_ar_razwi_id_reg; +}; + +/* razwi_happened masks */ +#define RAZWI_HAPPENED_HBW 0x1 +#define RAZWI_HAPPENED_LBW 0x2 +#define RAZWI_HAPPENED_AW 0x4 +#define RAZWI_HAPPENED_AR 0x8 + +struct hl_eq_razwi_info { + __le32 razwi_happened_mask; + union { + struct hl_eq_razwi_lbw_info_regs lbw; + struct hl_eq_razwi_hbw_info_regs hbw; + }; + __le32 pad; +}; + +struct hl_eq_razwi_with_intr_cause { + struct hl_eq_razwi_info razwi_info; + struct hl_eq_intr_cause intr_cause; +}; + +#define HBM_CA_ERR_CMD_LIFO_LEN 8 +#define HBM_RD_ERR_DATA_LIFO_LEN 8 +#define HBM_WR_PAR_CMD_LIFO_LEN 11 + +enum hl_hbm_sei_cause { + /* Command/address parity error event is split into 2 events due to + * size limitation: ODD suffix for odd HBM CK_t cycles and EVEN suffix + * for even HBM CK_t cycles + */ + HBM_SEI_CMD_PARITY_EVEN, + HBM_SEI_CMD_PARITY_ODD, + /* Read errors can be reflected as a combination of SERR/DERR/parity + * errors. Therefore, we define one event for all read error types. + * LKD will perform further proccessing. + */ + HBM_SEI_READ_ERR, + HBM_SEI_WRITE_DATA_PARITY_ERR, + HBM_SEI_CATTRIP, + HBM_SEI_MEM_BIST_FAIL, + HBM_SEI_DFI, + HBM_SEI_INV_TEMP_READ_OUT, + HBM_SEI_BIST_FAIL, +}; + +/* Masks for parsing hl_hbm_sei_headr fields */ +#define HBM_ECC_SERR_CNTR_MASK 0xFF +#define HBM_ECC_DERR_CNTR_MASK 0xFF00 +#define HBM_RD_PARITY_CNTR_MASK 0xFF0000 + +/* HBM index and MC index are known by the event_id */ +struct hl_hbm_sei_header { + union { + /* relevant only in case of HBM read error */ + struct { + __u8 ecc_serr_cnt; + __u8 ecc_derr_cnt; + __u8 read_par_cnt; + __u8 reserved; + }; + /* All other cases */ + __le32 cnt; + }; + __u8 sei_cause; /* enum hl_hbm_sei_cause */ + __u8 mc_channel; /* range: 0-3 */ + __u8 mc_pseudo_channel; /* range: 0-7 */ + __u8 is_critical; +}; + +#define HBM_RD_ADDR_SID_SHIFT 0 +#define HBM_RD_ADDR_SID_MASK 0x1 +#define HBM_RD_ADDR_BG_SHIFT 1 +#define HBM_RD_ADDR_BG_MASK 0x6 +#define HBM_RD_ADDR_BA_SHIFT 3 +#define HBM_RD_ADDR_BA_MASK 0x18 +#define HBM_RD_ADDR_COL_SHIFT 5 +#define HBM_RD_ADDR_COL_MASK 0x7E0 +#define HBM_RD_ADDR_ROW_SHIFT 11 +#define HBM_RD_ADDR_ROW_MASK 0x3FFF800 + +struct hbm_rd_addr { + union { + /* bit fields are only for FW use */ + struct { + u32 dbg_rd_err_addr_sid:1; + u32 dbg_rd_err_addr_bg:2; + u32 dbg_rd_err_addr_ba:2; + u32 dbg_rd_err_addr_col:6; + u32 dbg_rd_err_addr_row:15; + u32 reserved:6; + }; + __le32 rd_addr_val; + }; +}; + +#define HBM_RD_ERR_BEAT_SHIFT 2 +/* dbg_rd_err_misc fields: */ +/* Read parity is calculated per DW on every beat */ +#define HBM_RD_ERR_PAR_ERR_BEAT0_SHIFT 0 +#define HBM_RD_ERR_PAR_ERR_BEAT0_MASK 0x3 +#define HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT 8 +#define HBM_RD_ERR_PAR_DATA_BEAT0_MASK 0x300 +/* ECC is calculated per PC on every beat */ +#define HBM_RD_ERR_SERR_BEAT0_SHIFT 16 +#define HBM_RD_ERR_SERR_BEAT0_MASK 0x10000 +#define HBM_RD_ERR_DERR_BEAT0_SHIFT 24 +#define HBM_RD_ERR_DERR_BEAT0_MASK 0x100000 + +struct hl_eq_hbm_sei_read_err_intr_info { + /* DFI_RD_ERR_REP_ADDR */ + struct hbm_rd_addr dbg_rd_err_addr; + /* DFI_RD_ERR_REP_ERR */ + union { + struct { + /* bit fields are only for FW use */ + u32 dbg_rd_err_par:8; + u32 dbg_rd_err_par_data:8; + u32 dbg_rd_err_serr:4; + u32 dbg_rd_err_derr:4; + u32 reserved:8; + }; + __le32 dbg_rd_err_misc; + }; + /* DFI_RD_ERR_REP_DM */ + __le32 dbg_rd_err_dm; + /* DFI_RD_ERR_REP_SYNDROME */ + __le32 dbg_rd_err_syndrome; + /* DFI_RD_ERR_REP_DATA */ + __le32 dbg_rd_err_data[HBM_RD_ERR_DATA_LIFO_LEN]; +}; + +struct hl_eq_hbm_sei_ca_par_intr_info { + /* 14 LSBs */ + __le16 dbg_row[HBM_CA_ERR_CMD_LIFO_LEN]; + /* 18 LSBs */ + __le32 dbg_col[HBM_CA_ERR_CMD_LIFO_LEN]; +}; + +#define WR_PAR_LAST_CMD_COL_SHIFT 0 +#define WR_PAR_LAST_CMD_COL_MASK 0x3F +#define WR_PAR_LAST_CMD_BG_SHIFT 6 +#define WR_PAR_LAST_CMD_BG_MASK 0xC0 +#define WR_PAR_LAST_CMD_BA_SHIFT 8 +#define WR_PAR_LAST_CMD_BA_MASK 0x300 +#define WR_PAR_LAST_CMD_SID_SHIFT 10 +#define WR_PAR_LAST_CMD_SID_MASK 0x400 + +/* Row address isn't latched */ +struct hbm_sei_wr_cmd_address { + /* DFI_DERR_LAST_CMD */ + union { + struct { + /* bit fields are only for FW use */ + u32 col:6; + u32 bg:2; + u32 ba:2; + u32 sid:1; + u32 reserved:21; + }; + __le32 dbg_wr_cmd_addr; + }; +}; + +struct hl_eq_hbm_sei_wr_par_intr_info { + /* entry 0: WR command address from the 1st cycle prior to the error + * entry 1: WR command address from the 2nd cycle prior to the error + * and so on... + */ + struct hbm_sei_wr_cmd_address dbg_last_wr_cmds[HBM_WR_PAR_CMD_LIFO_LEN]; + /* derr[0:1] - 1st HBM cycle DERR output + * derr[2:3] - 2nd HBM cycle DERR output + */ + __u8 dbg_derr; + /* extend to reach 8B */ + __u8 pad[3]; +}; + +/* + * this struct represents the following sei causes: + * command parity, ECC double error, ECC single error, dfi error, cattrip, + * temperature read-out, read parity error and write parity error. + * some only use the header while some have extra data. + */ +struct hl_eq_hbm_sei_data { + struct hl_hbm_sei_header hdr; + union { + struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_even_info; + struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_odd_info; + struct hl_eq_hbm_sei_read_err_intr_info read_err_info; + struct hl_eq_hbm_sei_wr_par_intr_info wr_parity_info; + }; +}; + +/* Engine/farm arc interrupt type */ +enum hl_engine_arc_interrupt_type { + /* Qman/farm ARC DCCM QUEUE FULL interrupt type */ + ENGINE_ARC_DCCM_QUEUE_FULL_IRQ = 1 +}; + +/* Data structure specifies details of payload of DCCM QUEUE FULL interrupt */ +struct hl_engine_arc_dccm_queue_full_irq { + /* Queue index value which caused DCCM QUEUE FULL */ + __le32 queue_index; + __le32 pad; +}; + +/* Data structure specifies details of QM/FARM ARC interrupt */ +struct hl_eq_engine_arc_intr_data { + /* ARC engine id e.g. DCORE0_TPC0_QM_ARC, DCORE0_TCP1_QM_ARC */ + __le32 engine_id; + __le32 intr_type; /* enum hl_engine_arc_interrupt_type */ + /* More info related to the interrupt e.g. queue index + * incase of DCCM_QUEUE_FULL interrupt. + */ + __le64 payload; + __le64 pad[5]; +}; + +#define ADDR_DEC_ADDRESS_COUNT_MAX 4 + +/* Data structure specifies details of ADDR_DEC interrupt */ +struct hl_eq_addr_dec_intr_data { + struct hl_eq_intr_cause intr_cause; + __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX]; + __u8 addr_cnt; + __u8 pad[7]; +}; + +struct hl_eq_entry { + struct hl_eq_header hdr; + union { + __le64 data_placeholder; + struct hl_eq_ecc_data ecc_data; + struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Obsolete */ + struct hl_eq_sm_sei_data sm_sei_data; + struct cpucp_pkt_sync_err pkt_sync_err; + struct hl_eq_fw_alive fw_alive; + struct hl_eq_intr_cause intr_cause; + struct hl_eq_pcie_drain_ind_data pcie_drain_ind_data; + struct hl_eq_razwi_info razwi_info; + struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause; + struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */ + struct hl_eq_engine_arc_intr_data arc_data; + struct hl_eq_addr_dec_intr_data addr_dec; + __le64 data[7]; + }; +}; + +#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry) + +#define EQ_CTL_READY_SHIFT 31 +#define EQ_CTL_READY_MASK 0x80000000 + +#define EQ_CTL_EVENT_TYPE_SHIFT 16 +#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000 + +#define EQ_CTL_INDEX_SHIFT 0 +#define EQ_CTL_INDEX_MASK 0x0000FFFF + +enum pq_init_status { + PQ_INIT_STATUS_NA = 0, + PQ_INIT_STATUS_READY_FOR_CP, + PQ_INIT_STATUS_READY_FOR_HOST, + PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI, + PQ_INIT_STATUS_LEN_NOT_POWER_OF_TWO_ERR, + PQ_INIT_STATUS_ILLEGAL_Q_ADDR_ERR +}; + +/* + * CpuCP Primary Queue Packets + * + * During normal operation, the host's kernel driver needs to send various + * messages to CpuCP, usually either to SET some value into a H/W periphery or + * to GET the current value of some H/W periphery. For example, SET the + * frequency of MME/TPC and GET the value of the thermal sensor. + * + * These messages can be initiated either by the User application or by the + * host's driver itself, e.g. power management code. In either case, the + * communication from the host's driver to CpuCP will *always* be in + * synchronous mode, meaning that the host will send a single message and poll + * until the message was acknowledged and the results are ready (if results are + * needed). + * + * This means that only a single message can be sent at a time and the host's + * driver must wait for its result before sending the next message. Having said + * that, because these are control messages which are sent in a relatively low + * frequency, this limitation seems acceptable. It's important to note that + * in case of multiple devices, messages to different devices *can* be sent + * at the same time. + * + * The message, inputs/outputs (if relevant) and fence object will be located + * on the device DDR at an address that will be determined by the host's driver. + * During device initialization phase, the host will pass to CpuCP that address. + * Most of the message types will contain inputs/outputs inside the message + * itself. The common part of each message will contain the opcode of the + * message (its type) and a field representing a fence object. + * + * When the host's driver wishes to send a message to CPU CP, it will write the + * message contents to the device DDR, clear the fence object and then write to + * the PSOC_ARC1_AUX_SW_INTR, to issue interrupt 121 to ARC Management CPU. + * + * Upon receiving the interrupt (#121), CpuCP will read the message from the + * DDR. In case the message is a SET operation, CpuCP will first perform the + * operation and then write to the fence object on the device DDR. In case the + * message is a GET operation, CpuCP will first fill the results section on the + * device DDR and then write to the fence object. If an error occurred, CpuCP + * will fill the rc field with the right error code. + * + * In the meantime, the host's driver will poll on the fence object. Once the + * host sees that the fence object is signaled, it will read the results from + * the device DDR (if relevant) and resume the code execution in the host's + * driver. + * + * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8 + * so the value being put by the host's driver matches the value read by CpuCP + * + * Non-QMAN packets should be limited to values 1 through (2^8 - 1) + * + * Detailed description: + * + * CPUCP_PACKET_DISABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU must NOT issue PCI + * transactions (read/write) towards the Host CPU. This also include + * sending MSI-X interrupts. + * This packet is usually sent before the device is moved to D3Hot state. + * + * CPUCP_PACKET_ENABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU is allowed to issue PCI + * transactions towards the Host CPU, including sending MSI-X interrupts. + * This packet is usually send after the device is moved to D0 state. + * + * CPUCP_PACKET_TEMPERATURE_GET - + * Fetch the current temperature / Max / Max Hyst / Critical / + * Critical Hyst of a specified thermal sensor. The packet's + * arguments specify the desired sensor and the field to get. + * + * CPUCP_PACKET_VOLTAGE_GET - + * Fetch the voltage / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_CURRENT_GET - + * Fetch the current / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_FAN_SPEED_GET - + * Fetch the speed / Max / Min of a specified fan. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_PWM_GET - + * Fetch the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_PWM_SET - + * Set the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor, type and value. + * + * CPUCP_PACKET_FREQUENCY_SET - + * Set the frequency of a specified PLL. The packet's arguments specify + * the PLL and the desired frequency. The actual frequency in the device + * might differ from the requested frequency. + * + * CPUCP_PACKET_FREQUENCY_GET - + * Fetch the frequency of a specified PLL. The packet's arguments specify + * the PLL. + * + * CPUCP_PACKET_LED_SET - + * Set the state of a specified led. The packet's arguments + * specify the led and the desired state. + * + * CPUCP_PACKET_I2C_WR - + * Write 32-bit value to I2C device. The packet's arguments specify the + * I2C bus, address and value. + * + * CPUCP_PACKET_I2C_RD - + * Read 32-bit value from I2C device. The packet's arguments specify the + * I2C bus and address. + * + * CPUCP_PACKET_INFO_GET - + * Fetch information from the device as specified in the packet's + * structure. The host's driver passes the max size it allows the CpuCP to + * write to the structure, to prevent data corruption in case of + * mismatched driver/FW versions. + * + * CPUCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed + * + * CPUCP_PACKET_UNMASK_RAZWI_IRQ - + * Unmask the given IRQ. The IRQ number is specified in the value field. + * The packet is sent after receiving an interrupt and printing its + * relevant information. + * + * CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY - + * Unmask the given IRQs. The IRQs numbers are specified in an array right + * after the cpucp_packet structure, where its first element is the array + * length. The packet is sent after a soft reset was done in order to + * handle any interrupts that were sent during the reset process. + * + * CPUCP_PACKET_TEST - + * Test packet for CpuCP connectivity. The CPU will put the fence value + * in the result field. + * + * CPUCP_PACKET_FREQUENCY_CURR_GET - + * Fetch the current frequency of a specified PLL. The packet's arguments + * specify the PLL. + * + * CPUCP_PACKET_MAX_POWER_GET - + * Fetch the maximal power of the device. + * + * CPUCP_PACKET_MAX_POWER_SET - + * Set the maximal power of the device. The packet's arguments specify + * the power. + * + * CPUCP_PACKET_EEPROM_DATA_GET - + * Get EEPROM data from the CpuCP kernel. The buffer is specified in the + * addr field. The CPU will put the returned data size in the result + * field. In addition, the host's driver passes the max size it allows the + * CpuCP to write to the structure, to prevent data corruption in case of + * mismatched driver/FW versions. + * + * CPUCP_PACKET_NIC_INFO_GET - + * Fetch information from the device regarding the NIC. the host's driver + * passes the max size it allows the CpuCP to write to the structure, to + * prevent data corruption in case of mismatched driver/FW versions. + * + * CPUCP_PACKET_TEMPERATURE_SET - + * Set the value of the offset property of a specified thermal sensor. + * The packet's arguments specify the desired sensor and the field to + * set. + * + * CPUCP_PACKET_VOLTAGE_SET - + * Trigger the reset_history property of a specified voltage sensor. + * The packet's arguments specify the desired sensor and the field to + * set. + * + * CPUCP_PACKET_CURRENT_SET - + * Trigger the reset_history property of a specified current sensor. + * The packet's arguments specify the desired sensor and the field to + * set. + * + * CPUCP_PACKET_PCIE_THROUGHPUT_GET - + * Get throughput of PCIe. + * The packet's arguments specify the transaction direction (TX/RX). + * The window measurement is 10[msec], and the return value is in KB/sec. + * + * CPUCP_PACKET_PCIE_REPLAY_CNT_GET + * Replay count measures number of "replay" events, which is basicly + * number of retries done by PCIe. + * + * CPUCP_PACKET_TOTAL_ENERGY_GET - + * Total Energy is measurement of energy from the time FW Linux + * is loaded. It is calculated by multiplying the average power + * by time (passed from armcp start). The units are in MilliJouls. + * + * CPUCP_PACKET_PLL_INFO_GET - + * Fetch frequencies of PLL from the required PLL IP. + * The packet's arguments specify the device PLL type + * Pll type is the PLL from device pll_index enum. + * The result is composed of 4 outputs, each is 16-bit + * frequency in MHz. + * + * CPUCP_PACKET_POWER_GET - + * Fetch the present power consumption of the device (Current * Voltage). + * + * CPUCP_PACKET_NIC_PFC_SET - + * Enable/Disable the NIC PFC feature. The packet's arguments specify the + * NIC port, relevant lanes to configure and one bit indication for + * enable/disable. + * + * CPUCP_PACKET_NIC_FAULT_GET - + * Fetch the current indication for local/remote faults from the NIC MAC. + * The result is 32-bit value of the relevant register. + * + * CPUCP_PACKET_NIC_LPBK_SET - + * Enable/Disable the MAC loopback feature. The packet's arguments specify + * the NIC port, relevant lanes to configure and one bit indication for + * enable/disable. + * + * CPUCP_PACKET_NIC_MAC_INIT - + * Configure the NIC MAC channels. The packet's arguments specify the + * NIC port and the speed. + * + * CPUCP_PACKET_MSI_INFO_SET - + * set the index number for each supported msi type going from + * host to device + * + * CPUCP_PACKET_NIC_XPCS91_REGS_GET - + * Fetch the un/correctable counters values from the NIC MAC. + * + * CPUCP_PACKET_NIC_STAT_REGS_GET - + * Fetch various NIC MAC counters from the NIC STAT. + * + * CPUCP_PACKET_NIC_STAT_REGS_CLR - + * Clear the various NIC MAC counters in the NIC STAT. + * + * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET - + * Fetch all NIC MAC counters from the NIC STAT. + * + * CPUCP_PACKET_IS_IDLE_CHECK - + * Check if the device is IDLE in regard to the DMA/compute engines + * and QMANs. The f/w will return a bitmask where each bit represents + * a different engine or QMAN according to enum cpucp_idle_mask. + * The bit will be 1 if the engine is NOT idle. + * + * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET - + * Fetch all HBM replaced-rows and prending to be replaced rows data. + * + * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS - + * Fetch status of HBM rows pending replacement and need a reboot to + * be replaced. + * + * CPUCP_PACKET_POWER_SET - + * Resets power history of device to 0 + * + * CPUCP_PACKET_ENGINE_CORE_ASID_SET - + * Packet to perform engine core ASID configuration + * + * CPUCP_PACKET_SEC_ATTEST_GET - + * Get the attestaion data that is collected during various stages of the + * boot sequence. the attestation data is also hashed with some unique + * number (nonce) provided by the host to prevent replay attacks. + * public key and certificate also provided as part of the FW response. + * + * CPUCP_PACKET_INFO_SIGNED_GET - + * Get the device information signed by the Trusted Platform device. + * device info data is also hashed with some unique number (nonce) provided + * by the host to prevent replay attacks. public key and certificate also + * provided as part of the FW response. + * + * CPUCP_PACKET_MONITOR_DUMP_GET - + * Get monitors registers dump from the CpuCP kernel. + * The CPU will put the registers dump in the a buffer allocated by the driver + * which address is passed via the CpuCp packet. In addition, the host's driver + * passes the max size it allows the CpuCP to write to the structure, to prevent + * data corruption in case of mismatched driver/FW versions. + * Obsolete. + * + * CPUCP_PACKET_GENERIC_PASSTHROUGH - + * Generic opcode for all firmware info that is only passed to host + * through the LKD, without getting parsed there. + * + * CPUCP_PACKET_ACTIVE_STATUS_SET - + * LKD sends FW indication whether device is free or in use, this indication is reported + * also to the BMC. + * + * CPUCP_PACKET_SOFT_RESET - + * Packet to perform soft-reset. + * + * CPUCP_PACKET_INTS_REGISTER - + * Packet to inform FW that queues have been established and LKD is ready to receive + * EQ events. + */ + +enum cpucp_packet_id { + CPUCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */ + CPUCP_PACKET_ENABLE_PCI_ACCESS, /* internal */ + CPUCP_PACKET_TEMPERATURE_GET, /* sysfs */ + CPUCP_PACKET_VOLTAGE_GET, /* sysfs */ + CPUCP_PACKET_CURRENT_GET, /* sysfs */ + CPUCP_PACKET_FAN_SPEED_GET, /* sysfs */ + CPUCP_PACKET_PWM_GET, /* sysfs */ + CPUCP_PACKET_PWM_SET, /* sysfs */ + CPUCP_PACKET_FREQUENCY_SET, /* sysfs */ + CPUCP_PACKET_FREQUENCY_GET, /* sysfs */ + CPUCP_PACKET_LED_SET, /* debugfs */ + CPUCP_PACKET_I2C_WR, /* debugfs */ + CPUCP_PACKET_I2C_RD, /* debugfs */ + CPUCP_PACKET_INFO_GET, /* IOCTL */ + CPUCP_PACKET_FLASH_PROGRAM_REMOVED, + CPUCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */ + CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */ + CPUCP_PACKET_TEST, /* internal */ + CPUCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */ + CPUCP_PACKET_MAX_POWER_GET, /* sysfs */ + CPUCP_PACKET_MAX_POWER_SET, /* sysfs */ + CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */ + CPUCP_PACKET_NIC_INFO_GET, /* internal */ + CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */ + CPUCP_PACKET_VOLTAGE_SET, /* sysfs */ + CPUCP_PACKET_CURRENT_SET, /* sysfs */ + CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */ + CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */ + CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */ + CPUCP_PACKET_PLL_INFO_GET, /* internal */ + CPUCP_PACKET_NIC_STATUS, /* internal */ + CPUCP_PACKET_POWER_GET, /* internal */ + CPUCP_PACKET_NIC_PFC_SET, /* internal */ + CPUCP_PACKET_NIC_FAULT_GET, /* internal */ + CPUCP_PACKET_NIC_LPBK_SET, /* internal */ + CPUCP_PACKET_NIC_MAC_CFG, /* internal */ + CPUCP_PACKET_MSI_INFO_SET, /* internal */ + CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */ + CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */ + CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */ + CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */ + CPUCP_PACKET_IS_IDLE_CHECK, /* internal */ + CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */ + CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */ + CPUCP_PACKET_POWER_SET, /* internal */ + CPUCP_PACKET_RESERVED, /* not used */ + CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */ + CPUCP_PACKET_RESERVED2, /* not used */ + CPUCP_PACKET_SEC_ATTEST_GET, /* internal */ + CPUCP_PACKET_INFO_SIGNED_GET, /* internal */ + CPUCP_PACKET_RESERVED4, /* not used */ + CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */ + CPUCP_PACKET_RESERVED5, /* not used */ + CPUCP_PACKET_RESERVED6, /* not used */ + CPUCP_PACKET_RESERVED7, /* not used */ + CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */ + CPUCP_PACKET_RESERVED8, /* not used */ + CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */ + CPUCP_PACKET_RESERVED9, /* not used */ + CPUCP_PACKET_RESERVED10, /* not used */ + CPUCP_PACKET_RESERVED11, /* not used */ + CPUCP_PACKET_RESERVED12, /* internal */ + CPUCP_PACKET_RESERVED13, /* internal */ + CPUCP_PACKET_SOFT_RESET, /* internal */ + CPUCP_PACKET_INTS_REGISTER, /* internal */ + CPUCP_PACKET_ID_MAX /* must be last */ +}; + +#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5 + +#define CPUCP_PKT_CTL_RC_SHIFT 12 +#define CPUCP_PKT_CTL_RC_MASK 0x0000F000 + +#define CPUCP_PKT_CTL_OPCODE_SHIFT 16 +#define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000 + +#define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0 +#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFFull +#define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16 +#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000ull +#define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32 +#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000ull +#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48 +#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull + +#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0 +#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull +#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16 +#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull + +#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0 +#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull +#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1 +#define CPUCP_PKT_VAL_PFC_IN2_MASK 0x000000000000001Eull + +#define CPUCP_PKT_VAL_LPBK_IN1_SHIFT 0 +#define CPUCP_PKT_VAL_LPBK_IN1_MASK 0x0000000000000001ull +#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1 +#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull + +#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0 +#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull +#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1 +#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull + +/* heartbeat status bits */ +#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0 +#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001 + +struct cpucp_packet { + union { + __le64 value; /* For SET packets */ + __le64 result; /* For GET packets */ + __le64 addr; /* For PQ */ + }; + + __le32 ctl; + + __le32 fence; /* Signal to host that message is completed */ + + union { + struct {/* For temperature/current/voltage/fan/pwm get/set */ + __le16 sensor_index; + __le16 type; + }; + + struct { /* For I2C read/write */ + __u8 i2c_bus; + __u8 i2c_addr; + __u8 i2c_reg; + /* + * In legacy implemetations, i2c_len was not present, + * was unused and just added as pad. + * So if i2c_len is 0, it is treated as legacy + * and r/w 1 Byte, else if i2c_len is specified, + * its treated as new multibyte r/w support. + */ + __u8 i2c_len; + }; + + struct {/* For PLL info fetch */ + __le16 pll_type; + /* TODO pll_reg is kept temporary before removal */ + __le16 pll_reg; + }; + + /* For any general request */ + __le32 index; + + /* For frequency get/set */ + __le32 pll_index; + + /* For led set */ + __le32 led_index; + + /* For get CpuCP info/EEPROM data/NIC info */ + __le32 data_max_size; + + /* + * For any general status bitmask. Shall be used whenever the + * result cannot be used to hold general purpose data. + */ + __le32 status_mask; + + /* random, used once number, for security packets */ + __le32 nonce; + }; + + union { + /* For NIC requests */ + __le32 port_index; + + /* For Generic packet sub index */ + __le32 pkt_subidx; + }; +}; + +struct cpucp_unmask_irq_arr_packet { + struct cpucp_packet cpucp_pkt; + __le32 length; + __le32 irqs[]; +}; + +struct cpucp_nic_status_packet { + struct cpucp_packet cpucp_pkt; + __le32 length; + __le32 data[]; +}; + +struct cpucp_array_data_packet { + struct cpucp_packet cpucp_pkt; + __le32 length; + __le32 data[]; +}; + +enum cpucp_led_index { + CPUCP_LED0_INDEX = 0, + CPUCP_LED1_INDEX, + CPUCP_LED2_INDEX, + CPUCP_LED_MAX_INDEX = CPUCP_LED2_INDEX +}; + +/* + * enum cpucp_packet_rc - Error return code + * @cpucp_packet_success -> in case of success. + * @cpucp_packet_invalid -> this is to support first generation platforms. + * @cpucp_packet_fault -> in case of processing error like failing to + * get device binding or semaphore etc. + * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported. + * @cpucp_packet_invalid_params -> when checking parameter like length of buffer + * or attribute value etc. + * @cpucp_packet_rc_max -> It indicates size of enum so should be at last. + */ +enum cpucp_packet_rc { + cpucp_packet_success, + cpucp_packet_invalid, + cpucp_packet_fault, + cpucp_packet_invalid_pkt, + cpucp_packet_invalid_params, + cpucp_packet_rc_max +}; + +/* + * cpucp_temp_type should adhere to hwmon_temp_attributes + * defined in Linux kernel hwmon.h file + */ +enum cpucp_temp_type { + cpucp_temp_input, + cpucp_temp_min = 4, + cpucp_temp_min_hyst, + cpucp_temp_max = 6, + cpucp_temp_max_hyst, + cpucp_temp_crit, + cpucp_temp_crit_hyst, + cpucp_temp_offset = 19, + cpucp_temp_lowest = 21, + cpucp_temp_highest = 22, + cpucp_temp_reset_history = 23, + cpucp_temp_warn = 24, + cpucp_temp_max_crit = 25, + cpucp_temp_max_warn = 26, +}; + +enum cpucp_in_attributes { + cpucp_in_input, + cpucp_in_min, + cpucp_in_max, + cpucp_in_lowest = 6, + cpucp_in_highest = 7, + cpucp_in_reset_history, + cpucp_in_intr_alarm_a, + cpucp_in_intr_alarm_b, +}; + +enum cpucp_curr_attributes { + cpucp_curr_input, + cpucp_curr_min, + cpucp_curr_max, + cpucp_curr_lowest = 6, + cpucp_curr_highest = 7, + cpucp_curr_reset_history +}; + +enum cpucp_fan_attributes { + cpucp_fan_input, + cpucp_fan_min = 2, + cpucp_fan_max +}; + +enum cpucp_pwm_attributes { + cpucp_pwm_input, + cpucp_pwm_enable +}; + +enum cpucp_pcie_throughput_attributes { + cpucp_pcie_throughput_tx, + cpucp_pcie_throughput_rx +}; + +/* TODO temporary kept before removal */ +enum cpucp_pll_reg_attributes { + cpucp_pll_nr_reg, + cpucp_pll_nf_reg, + cpucp_pll_od_reg, + cpucp_pll_div_factor_reg, + cpucp_pll_div_sel_reg +}; + +/* TODO temporary kept before removal */ +enum cpucp_pll_type_attributes { + cpucp_pll_cpu, + cpucp_pll_pci, +}; + +/* + * cpucp_power_type aligns with hwmon_power_attributes + * defined in Linux kernel hwmon.h file + */ +enum cpucp_power_type { + CPUCP_POWER_INPUT = 8, + CPUCP_POWER_INPUT_HIGHEST = 9, + CPUCP_POWER_RESET_INPUT_HISTORY = 11 +}; + +/* + * MSI type enumeration table for all ASICs and future SW versions. + * For future ASIC-LKD compatibility, we can only add new enumerations. + * at the end of the table (before CPUCP_NUM_OF_MSI_TYPES). + * Changing the order of entries or removing entries is not allowed. + */ +enum cpucp_msi_type { + CPUCP_EVENT_QUEUE_MSI_TYPE, + CPUCP_NIC_PORT1_MSI_TYPE, + CPUCP_NIC_PORT3_MSI_TYPE, + CPUCP_NIC_PORT5_MSI_TYPE, + CPUCP_NIC_PORT7_MSI_TYPE, + CPUCP_NIC_PORT9_MSI_TYPE, + CPUCP_EVENT_QUEUE_ERR_MSI_TYPE, + CPUCP_NUM_OF_MSI_TYPES +}; + +/* + * PLL enumeration table used for all ASICs and future SW versions. + * For future ASIC-LKD compatibility, we can only add new enumerations. + * at the end of the table. + * Changing the order of entries or removing entries is not allowed. + */ +enum pll_index { + CPU_PLL = 0, + PCI_PLL = 1, + NIC_PLL = 2, + DMA_PLL = 3, + MESH_PLL = 4, + MME_PLL = 5, + TPC_PLL = 6, + IF_PLL = 7, + SRAM_PLL = 8, + NS_PLL = 9, + HBM_PLL = 10, + MSS_PLL = 11, + DDR_PLL = 12, + VID_PLL = 13, + BANK_PLL = 14, + MMU_PLL = 15, + IC_PLL = 16, + MC_PLL = 17, + EMMC_PLL = 18, + D2D_PLL = 19, + CS_PLL = 20, + C2C_PLL = 21, + NCH_PLL = 22, + C2M_PLL = 23, + PLL_MAX +}; + +enum rl_index { + TPC_RL = 0, + MME_RL, + EDMA_RL, +}; + +enum pvt_index { + PVT_SW, + PVT_SE, + PVT_NW, + PVT_NE +}; + +/* Event Queue Packets */ + +struct eq_generic_event { + __le64 data[7]; +}; + +/* + * CpuCP info + */ + +#define CARD_NAME_MAX_LEN 16 +#define CPUCP_MAX_SENSORS 128 +#define CPUCP_MAX_NICS 128 +#define CPUCP_LANES_PER_NIC 4 +#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN 1024 +#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC) +#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64) +#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64) +#define CPUCP_HBM_ROW_REPLACE_MAX 32 + +struct cpucp_sensor { + __le32 type; + __le32 flags; +}; + +/** + * struct cpucp_card_types - ASIC card type. + * @cpucp_card_type_pci: PCI card. + * @cpucp_card_type_pmc: PCI Mezzanine Card. + */ +enum cpucp_card_types { + cpucp_card_type_pci, + cpucp_card_type_pmc +}; + +#define CPUCP_SEC_CONF_ENABLED_SHIFT 0 +#define CPUCP_SEC_CONF_ENABLED_MASK 0x00000001 + +#define CPUCP_SEC_CONF_FLASH_WP_SHIFT 1 +#define CPUCP_SEC_CONF_FLASH_WP_MASK 0x00000002 + +#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT 2 +#define CPUCP_SEC_CONF_EEPROM_WP_MASK 0x00000004 + +/** + * struct cpucp_security_info - Security information. + * @config: configuration bit field + * @keys_num: number of stored keys + * @revoked_keys: revoked keys bit field + * @min_svn: minimal security version + */ +struct cpucp_security_info { + __u8 config; + __u8 keys_num; + __u8 revoked_keys; + __u8 min_svn; +}; + +/** + * struct cpucp_info - Info from CpuCP that is necessary to the host's driver + * @sensors: available sensors description. + * @kernel_version: CpuCP linux kernel version. + * @reserved: reserved field. + * @card_type: card configuration type. + * @card_location: in a server, each card has different connections topology + * depending on its location (relevant for PMC card type) + * @cpld_version: CPLD programmed F/W version. + * @infineon_version: Infineon main DC-DC version. + * @fuse_version: silicon production FUSE information. + * @thermal_version: thermald S/W version. + * @cpucp_version: CpuCP S/W version. + * @infineon_second_stage_version: Infineon 2nd stage DC-DC version. + * @dram_size: available DRAM size. + * @card_name: card name that will be displayed in HWMON subsystem on the host + * @tpc_binning_mask: TPC binning mask, 1 bit per TPC instance + * (0 = functional, 1 = binned) + * @decoder_binning_mask: Decoder binning mask, 1 bit per decoder instance + * (0 = functional, 1 = binned), maximum 1 per dcore + * @sram_binning: Categorize SRAM functionality + * (0 = fully functional, 1 = lower-half is not functional, + * 2 = upper-half is not functional) + * @sec_info: security information + * @pll_map: Bit map of supported PLLs for current ASIC version. + * @mme_binning_mask: MME binning mask, + * bits [0:6] <==> dcore0 mme fma + * bits [7:13] <==> dcore1 mme fma + * bits [14:20] <==> dcore0 mme ima + * bits [21:27] <==> dcore1 mme ima + * For each group, if the 6th bit is set then first 5 bits + * represent the col's idx [0-31], otherwise these bits are + * ignored, and col idx 32 is binned. 7th bit is don't care. + * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance + * (0 = functional 1 = binned) + * @memory_repair_flag: eFuse flag indicating memory repair + * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance + * (0 = functional 1 = binned) + * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance + * (0 = functional 1 = binned) + * @interposer_version: Interposer version programmed in eFuse + * @substrate_version: Substrate version programmed in eFuse + * @eq_health_check_supported: eq health check feature supported in FW. + * @fw_hbm_region_size: Size in bytes of FW reserved region in HBM. + * @fw_os_version: Firmware OS Version + */ +struct cpucp_info { + struct cpucp_sensor sensors[CPUCP_MAX_SENSORS]; + __u8 kernel_version[VERSION_MAX_LEN]; + __le32 reserved; + __le32 card_type; + __le32 card_location; + __le32 cpld_version; + __le32 infineon_version; + __u8 fuse_version[VERSION_MAX_LEN]; + __u8 thermal_version[VERSION_MAX_LEN]; + __u8 cpucp_version[VERSION_MAX_LEN]; + __le32 infineon_second_stage_version; + __le64 dram_size; + char card_name[CARD_NAME_MAX_LEN]; + __le64 tpc_binning_mask; + __le64 decoder_binning_mask; + __u8 sram_binning; + __u8 dram_binning_mask; + __u8 memory_repair_flag; + __u8 edma_binning_mask; + __u8 xbar_binning_mask; + __u8 interposer_version; + __u8 substrate_version; + __u8 eq_health_check_supported; + struct cpucp_security_info sec_info; + __le32 fw_hbm_region_size; + __u8 pll_map[PLL_MAP_LEN]; + __le64 mme_binning_mask; + __u8 fw_os_version[VERSION_MAX_LEN]; +}; + +struct cpucp_mac_addr { + __u8 mac_addr[ETH_ALEN]; +}; + +enum cpucp_serdes_type { + TYPE_1_SERDES_TYPE, + TYPE_2_SERDES_TYPE, + HLS1_SERDES_TYPE, + HLS1H_SERDES_TYPE, + HLS2_SERDES_TYPE, + HLS2_TYPE_1_SERDES_TYPE, + MAX_NUM_SERDES_TYPE, /* number of types */ + UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */ +}; + +struct cpucp_nic_info { + struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS]; + __le64 link_mask[CPUCP_NIC_MASK_ARR_LEN]; + __le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN]; + __le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN]; + __le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN]; + __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN]; + __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN]; + __le16 serdes_type; /* enum cpucp_serdes_type */ + __le16 tx_swap_map[CPUCP_MAX_NICS]; + __u8 reserved[6]; +}; + +#define PAGE_DISCARD_MAX 64 + +struct page_discard_info { + __u8 num_entries; + __u8 reserved[7]; + __le32 mmu_page_idx[PAGE_DISCARD_MAX]; +}; + +/* + * struct frac_val - fracture value represented by "integer.frac". + * @integer: the integer part of the fracture value; + * @frac: the fracture part of the fracture value. + */ +struct frac_val { + union { + struct { + __le16 integer; + __le16 frac; + }; + __le32 val; + }; +}; + +/* + * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp". + * @integer: the integer part of the SER value; + * @exp: the exponent part of the SER value. + */ +struct ser_val { + __le16 integer; + __le16 exp; +}; + +/* + * struct cpucp_nic_status - describes the status of a NIC port. + * @port: NIC port index. + * @bad_format_cnt: e.g. CRC. + * @responder_out_of_sequence_psn_cnt: e.g NAK. + * @high_ber_reinit_cnt: link reinit due to high BER. + * @correctable_err_cnt: e.g. bit-flip. + * @uncorrectable_err_cnt: e.g. MAC errors. + * @retraining_cnt: re-training counter. + * @up: is port up. + * @pcs_link: has PCS link. + * @phy_ready: is PHY ready. + * @auto_neg: is Autoneg enabled. + * @timeout_retransmission_cnt: timeout retransmission events. + * @high_ber_cnt: high ber events. + * @pre_fec_ser: pre FEC SER value. + * @post_fec_ser: post FEC SER value. + * @throughput: measured throughput. + * @latency: measured latency. + */ +struct cpucp_nic_status { + __le32 port; + __le32 bad_format_cnt; + __le32 responder_out_of_sequence_psn_cnt; + __le32 high_ber_reinit; + __le32 correctable_err_cnt; + __le32 uncorrectable_err_cnt; + __le32 retraining_cnt; + __u8 up; + __u8 pcs_link; + __u8 phy_ready; + __u8 auto_neg; + __le32 timeout_retransmission_cnt; + __le32 high_ber_cnt; + struct ser_val pre_fec_ser; + struct ser_val post_fec_ser; + struct frac_val bandwidth; + struct frac_val lat; +}; + +enum cpucp_hbm_row_replace_cause { + REPLACE_CAUSE_DOUBLE_ECC_ERR, + REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR, +}; + +struct cpucp_hbm_row_info { + __u8 hbm_idx; + __u8 pc; + __u8 sid; + __u8 bank_idx; + __le16 row_addr; + __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */ + __u8 pad; +}; + +struct cpucp_hbm_row_replaced_rows_info { + __le16 num_replaced_rows; + __u8 pad[6]; + struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX]; +}; + +enum cpu_reset_status { + CPU_RST_STATUS_NA = 0, + CPU_RST_STATUS_SOFT_RST_DONE = 1, +}; + +#define SEC_PCR_DATA_BUF_SZ 256 +#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */ +#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */ +#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */ +#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */ + +/* + * struct cpucp_sec_attest_info - attestation report of the boot + * @pcr_data: raw values of the PCR registers + * @pcr_num_reg: number of PCR registers in the pcr_data array + * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes) + * @nonce: number only used once. random number provided by host. this also + * passed to the quote command as a qualifying data. + * @pcr_quote_len: length of the attestation quote data (bytes) + * @pcr_quote: attestation report data structure + * @quote_sig_len: length of the attestation report signature (bytes) + * @quote_sig: signature structure of the attestation report + * @pub_data_len: length of the public data (bytes) + * @public_data: public key for the signed attestation + * (outPublic + name + qualifiedName) + * @certificate_len: length of the certificate (bytes) + * @certificate: certificate for the attestation signing key + */ +struct cpucp_sec_attest_info { + __u8 pcr_data[SEC_PCR_DATA_BUF_SZ]; + __u8 pcr_num_reg; + __u8 pcr_reg_len; + __le16 pad0; + __le32 nonce; + __le16 pcr_quote_len; + __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ]; + __u8 quote_sig_len; + __u8 quote_sig[SEC_SIGNATURE_BUF_SZ]; + __le16 pub_data_len; + __u8 public_data[SEC_PUB_DATA_BUF_SZ]; + __le16 certificate_len; + __u8 certificate[SEC_CERTIFICATE_BUF_SZ]; +}; + +/* + * struct cpucp_dev_info_signed - device information signed by a secured device + * @info: device information structure as defined above + * @nonce: number only used once. random number provided by host. this number is + * hashed and signed along with the device information. + * @info_sig_len: length of the attestation signature (bytes) + * @info_sig: signature of the info + nonce data. + * @pub_data_len: length of the public data (bytes) + * @public_data: public key info signed info data + * (outPublic + name + qualifiedName) + * @certificate_len: length of the certificate (bytes) + * @certificate: certificate for the signing key + */ +struct cpucp_dev_info_signed { + struct cpucp_info info; /* assumed to be 64bit aligned */ + __le32 nonce; + __le32 pad0; + __u8 info_sig_len; + __u8 info_sig[SEC_SIGNATURE_BUF_SZ]; + __le16 pub_data_len; + __u8 public_data[SEC_PUB_DATA_BUF_SZ]; + __le16 certificate_len; + __u8 certificate[SEC_CERTIFICATE_BUF_SZ]; +}; + +#define DCORE_MON_REGS_SZ 512 +/* + * struct dcore_monitor_regs_data - DCORE monitor regs data. + * the structure follows sync manager block layout. Obsolete. + * @mon_pay_addrl: array of payload address low bits. + * @mon_pay_addrh: array of payload address high bits. + * @mon_pay_data: array of payload data. + * @mon_arm: array of monitor arm. + * @mon_status: array of monitor status. + */ +struct dcore_monitor_regs_data { + __le32 mon_pay_addrl[DCORE_MON_REGS_SZ]; + __le32 mon_pay_addrh[DCORE_MON_REGS_SZ]; + __le32 mon_pay_data[DCORE_MON_REGS_SZ]; + __le32 mon_arm[DCORE_MON_REGS_SZ]; + __le32 mon_status[DCORE_MON_REGS_SZ]; +}; + +/* contains SM data for each SYNC_MNGR (Obsolete) */ +struct cpucp_monitor_dump { + struct dcore_monitor_regs_data sync_mngr_w_s; + struct dcore_monitor_regs_data sync_mngr_e_s; + struct dcore_monitor_regs_data sync_mngr_w_n; + struct dcore_monitor_regs_data sync_mngr_e_n; +}; + +/* + * The Type of the generic request (and other input arguments) will be fetched from user by reading + * from "pkt_subidx" field in struct cpucp_packet. + * + * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions. + */ +enum hl_passthrough_type { + HL_PASSTHROUGH_VERSIONS, +}; + +#endif /* CPUCP_IF_H */ diff --git a/include/linux/habanalabs/hl_boot_if.h b/include/linux/habanalabs/hl_boot_if.h new file mode 100644 index 000000000000..93366d5621fd --- /dev/null +++ b/include/linux/habanalabs/hl_boot_if.h @@ -0,0 +1,792 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef HL_BOOT_IF_H +#define HL_BOOT_IF_H + +#define LKD_HARD_RESET_MAGIC 0xED7BD694 /* deprecated - do not use */ +#define HL_POWER9_HOST_MAGIC 0x1DA30009 + +#define BOOT_FIT_SRAM_OFFSET 0x200000 + +#define VERSION_MAX_LEN 128 + +enum cpu_boot_err { + CPU_BOOT_ERR_DRAM_INIT_FAIL = 0, + CPU_BOOT_ERR_FIT_CORRUPTED = 1, + CPU_BOOT_ERR_TS_INIT_FAIL = 2, + CPU_BOOT_ERR_DRAM_SKIPPED = 3, + CPU_BOOT_ERR_BMC_WAIT_SKIPPED = 4, + CPU_BOOT_ERR_NIC_DATA_NOT_RDY = 5, + CPU_BOOT_ERR_NIC_FW_FAIL = 6, + CPU_BOOT_ERR_SECURITY_NOT_RDY = 7, + CPU_BOOT_ERR_SECURITY_FAIL = 8, + CPU_BOOT_ERR_EFUSE_FAIL = 9, + CPU_BOOT_ERR_PRI_IMG_VER_FAIL = 10, + CPU_BOOT_ERR_SEC_IMG_VER_FAIL = 11, + CPU_BOOT_ERR_PLL_FAIL = 12, + CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13, + CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18, + CPU_BOOT_ERR_BINNING_FAIL = 19, + CPU_BOOT_ERR_TPM_FAIL = 20, + CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21, + CPU_BOOT_ERR_EEPROM_FAIL = 22, + CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL = 23, + CPU_BOOT_ERR_ENABLED = 31, + CPU_BOOT_ERR_SCND_EN = 63, + CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */ +}; + +/* + * Mask for fatal failures + * This mask contains all possible fatal failures, and a dynamic code + * will clear the non-relevant ones. + */ +#define CPU_BOOT_ERR_FATAL_MASK \ + ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \ + (1 << CPU_BOOT_ERR_PLL_FAIL) | \ + (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \ + (1 << CPU_BOOT_ERR_BINNING_FAIL) | \ + (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \ + (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \ + (1 << CPU_BOOT_ERR_EEPROM_FAIL)) + +/* + * CPU error bits in BOOT_ERROR registers + * + * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed. + * DRAM is not reliable to use. + * + * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the + * image provided by the host has failed. + * + * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed. + * Boot continues as usual, but keep in + * mind this is a warning. + * + * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped. + * Skipping DRAM initialization has been + * requested (e.g. strap, command, etc.) + * and FW skipped the DRAM initialization. + * Host can initialize the DRAM. + * + * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped. + * Meaning the BMC data might not be + * available until reset. + * + * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready. + * BMC has not provided the NIC data yet. + * Once provided this bit will be cleared. + * + * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed. + * The NIC FW loading and initialization + * failed. This means NICs are not usable. + * + * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been + * started, but is not ready yet - chip + * cannot be accessed. + * + * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed. + * The tasks are security init (root of + * trust), boot authentication (chain of + * trust), data packets authentication. + * + * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed. + * The PCI device ID might be wrong. + * + * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL Verification of primary image failed. + * It mean that ppboot checksum + * verification for the preboot primary + * image has failed to match expected + * checksum. Trying to program image again + * might solve this. + * + * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL Verification of secondary image failed. + * It mean that ppboot checksum + * verification for the preboot secondary + * image has failed to match expected + * checksum. Trying to program image again + * might solve this. + * + * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one + * of the PLLs remains in REF_CLK + * + * CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support + * should be contacted. + * + * CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR Critical error was detected during + * the execution of ppboot or preboot. + * for example: stack overflow. + * + * CPU_BOOT_ERR0_BINNING_FAIL Binning settings failed, meaning + * malfunctioning components might still be + * in use. + * + * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed. + * + * CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL Failed to set threshold for tmperature + * sensor. + * + * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults + * are used. + * + * CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL Failed scrubbing the Engines/ARCFarm + * memories. Boot disabled until reset. + * + * CPU_BOOT_ERR0_ENABLED Error registers enabled. + * This is a main indication that the + * running FW populates the error + * registers. Meaning the error bits are + * not garbage, but actual error statuses. + */ +#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) +#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << CPU_BOOT_ERR_FIT_CORRUPTED) +#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << CPU_BOOT_ERR_TS_INIT_FAIL) +#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << CPU_BOOT_ERR_DRAM_SKIPPED) +#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << CPU_BOOT_ERR_BMC_WAIT_SKIPPED) +#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << CPU_BOOT_ERR_NIC_DATA_NOT_RDY) +#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << CPU_BOOT_ERR_NIC_FW_FAIL) +#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << CPU_BOOT_ERR_SECURITY_NOT_RDY) +#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << CPU_BOOT_ERR_SECURITY_FAIL) +#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << CPU_BOOT_ERR_EFUSE_FAIL) +#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << CPU_BOOT_ERR_PRI_IMG_VER_FAIL) +#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << CPU_BOOT_ERR_SEC_IMG_VER_FAIL) +#define CPU_BOOT_ERR0_PLL_FAIL (1 << CPU_BOOT_ERR_PLL_FAIL) +#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) +#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR) +#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL) +#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL) +#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL) +#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL) +#define CPU_BOOT_ERR0_ENG_ARC_MEM_SCRUB_FAIL (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) +#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED) +#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED) + +enum cpu_boot_dev_sts { + CPU_BOOT_DEV_STS_SECURITY_EN = 0, + CPU_BOOT_DEV_STS_DEBUG_EN = 1, + CPU_BOOT_DEV_STS_WATCHDOG_EN = 2, + CPU_BOOT_DEV_STS_DRAM_INIT_EN = 3, + CPU_BOOT_DEV_STS_BMC_WAIT_EN = 4, + CPU_BOOT_DEV_STS_E2E_CRED_EN = 5, + CPU_BOOT_DEV_STS_HBM_CRED_EN = 6, + CPU_BOOT_DEV_STS_RL_EN = 7, + CPU_BOOT_DEV_STS_SRAM_SCR_EN = 8, + CPU_BOOT_DEV_STS_DRAM_SCR_EN = 9, + CPU_BOOT_DEV_STS_FW_HARD_RST_EN = 10, + CPU_BOOT_DEV_STS_PLL_INFO_EN = 11, + CPU_BOOT_DEV_STS_SP_SRAM_EN = 12, + CPU_BOOT_DEV_STS_CLK_GATE_EN = 13, + CPU_BOOT_DEV_STS_HBM_ECC_EN = 14, + CPU_BOOT_DEV_STS_PKT_PI_ACK_EN = 15, + CPU_BOOT_DEV_STS_FW_LD_COM_EN = 16, + CPU_BOOT_DEV_STS_FW_IATU_CONF_EN = 17, + CPU_BOOT_DEV_STS_FW_NIC_MAC_EN = 18, + CPU_BOOT_DEV_STS_DYN_PLL_EN = 19, + CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN = 20, + CPU_BOOT_DEV_STS_EQ_INDEX_EN = 21, + CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN = 22, + CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN = 23, + CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24, + CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25, + CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26, + CPU_BOOT_DEV_STS_ENABLED = 31, + CPU_BOOT_DEV_STS_SCND_EN = 63, + CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */ +}; + +/* + * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers + * + * CPU_BOOT_DEV_STS0_SECURITY_EN Security is Enabled. + * This is an indication for security + * enabled in FW, which means that + * all conditions for security are met: + * device is indicated as security enabled, + * registers are protected, and device + * uses keys for image verification. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled. + * Enabled when JTAG or DEBUG is enabled + * in FW. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled. + * Watchdog is enabled in FW. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled. + * DRAM initialization has been done in FW. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled. + * If set, it means that during boot, + * FW waited for BMC data. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized. + * FW initialized E2E credits. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized. + * FW initialized HBM credits. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized. + * FW initialized rate limiter. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled. + * FW initialized SRAM scrambler. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled. + * FW initialized DRAM scrambler. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN FW hard reset procedure is enabled. + * FW has the hard reset procedure + * implemented. This means that FW will + * perform hard reset procedure on + * receiving the halt-machine event. + * Initialized in: preboot, u-boot, linux + * + * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available + * for use. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled. + * FW initialized Clock Gating. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled. + * FW handles HBM ECC indications. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd + * is set to the PI counter. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_FW_LD_COM_EN Flexible FW loading communication + * protocol is enabled. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN FW iATU configuration is enabled. + * This bit if set, means the iATU has been + * configured and is ready for use. + * Initialized in: ppboot + * + * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and + * any access to them is done via the FW. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled. + * FW sends to host a bitmap of supported + * PLLs. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN GIC access permission only from + * previleged entity. FW sets this status + * bit for host. If this bit is set then + * GIC can not be accessed from host. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_EQ_INDEX_EN Event Queue (EQ) index is a running + * index for each new event sent to host. + * This is used as a method in host to + * identify that the waiting event in + * queue is actually a new event which + * was not served before. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to + * prevent IRQs overriding each other. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN + * NIC STAT and XPCS91 access is restricted + * and is done via FW only. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN + * NIC STAT get all is supported. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN + * F/W checks if the device is idle by reading defined set + * of registers. It returns a bitmask of all the engines, + * where a bit is set if the engine is not idle. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_MAP_HWMON_EN + * If set, means f/w supports proprietary + * HWMON enum mapping to cpucp enums. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. + * This is a main indication that the + * running FW populates the device status + * register. Meaning the device status + * bits are not garbage, but actual + * statuses. + * Initialized in: preboot + * + */ +#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << CPU_BOOT_DEV_STS_SECURITY_EN) +#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << CPU_BOOT_DEV_STS_DEBUG_EN) +#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << CPU_BOOT_DEV_STS_WATCHDOG_EN) +#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << CPU_BOOT_DEV_STS_DRAM_INIT_EN) +#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << CPU_BOOT_DEV_STS_BMC_WAIT_EN) +#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << CPU_BOOT_DEV_STS_E2E_CRED_EN) +#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << CPU_BOOT_DEV_STS_HBM_CRED_EN) +#define CPU_BOOT_DEV_STS0_RL_EN (1 << CPU_BOOT_DEV_STS_RL_EN) +#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_SRAM_SCR_EN) +#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_DRAM_SCR_EN) +#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << CPU_BOOT_DEV_STS_FW_HARD_RST_EN) +#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << CPU_BOOT_DEV_STS_PLL_INFO_EN) +#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << CPU_BOOT_DEV_STS_SP_SRAM_EN) +#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << CPU_BOOT_DEV_STS_CLK_GATE_EN) +#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << CPU_BOOT_DEV_STS_HBM_ECC_EN) +#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << CPU_BOOT_DEV_STS_PKT_PI_ACK_EN) +#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << CPU_BOOT_DEV_STS_FW_LD_COM_EN) +#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << CPU_BOOT_DEV_STS_FW_IATU_CONF_EN) +#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_MAC_EN) +#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << CPU_BOOT_DEV_STS_DYN_PLL_EN) +#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN) +#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << CPU_BOOT_DEV_STS_EQ_INDEX_EN) +#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN) +#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN) +#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN) +#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN) +#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN) +#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED) +#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED) + +enum cpu_boot_status { + CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */ + CPU_BOOT_STATUS_IN_WFE = 1, + CPU_BOOT_STATUS_DRAM_RDY = 2, + CPU_BOOT_STATUS_SRAM_AVAIL = 3, + CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */ + CPU_BOOT_STATUS_IN_PREBOOT = 5, + CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */ + CPU_BOOT_STATUS_IN_UBOOT = 7, + CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */ + CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */ + /* U-Boot console prompt activated, commands are not processed */ + CPU_BOOT_STATUS_UBOOT_NOT_READY = 10, + /* Finished NICs init, reported after DRAM and NICs */ + CPU_BOOT_STATUS_NIC_FW_RDY = 11, + CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */ + CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */ + CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */ + /* Last boot loader progress status, ready to receive commands */ + CPU_BOOT_STATUS_READY_TO_BOOT = 15, + /* Internal Boot finished, ready for boot-fit */ + CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16, + /* Internal Security has been initialized, device can be accessed */ + CPU_BOOT_STATUS_SECURITY_READY = 17, + /* FW component is preparing to shutdown and communication with host is not available */ + CPU_BOOT_STATUS_FW_SHUTDOWN_PREP = 18, +}; + +enum kmd_msg { + KMD_MSG_NA = 0, + KMD_MSG_GOTO_WFE, + KMD_MSG_FIT_RDY, + KMD_MSG_SKIP_BMC, + RESERVED, + KMD_MSG_RST_DEV, + KMD_MSG_LAST +}; + +enum cpu_msg_status { + CPU_MSG_CLR = 0, + CPU_MSG_OK, + CPU_MSG_ERR, +}; + +/* communication registers mapping - consider ABI when changing */ +struct cpu_dyn_regs { + __le32 cpu_pq_base_addr_low; + __le32 cpu_pq_base_addr_high; + __le32 cpu_pq_length; + __le32 cpu_pq_init_status; + __le32 cpu_eq_base_addr_low; + __le32 cpu_eq_base_addr_high; + __le32 cpu_eq_length; + __le32 cpu_eq_ci; + __le32 cpu_cq_base_addr_low; + __le32 cpu_cq_base_addr_high; + __le32 cpu_cq_length; + __le32 cpu_pf_pq_pi; + __le32 cpu_boot_dev_sts0; + __le32 cpu_boot_dev_sts1; + __le32 cpu_boot_err0; + __le32 cpu_boot_err1; + __le32 cpu_boot_status; + __le32 fw_upd_sts; + __le32 fw_upd_cmd; + __le32 fw_upd_pending_sts; + __le32 fuse_ver_offset; + __le32 preboot_ver_offset; + __le32 uboot_ver_offset; + __le32 hw_state; + __le32 kmd_msg_to_cpu; + __le32 cpu_cmd_status_to_host; + __le32 gic_host_pi_upd_irq; + __le32 gic_tpc_qm_irq_ctrl; + __le32 gic_mme_qm_irq_ctrl; + __le32 gic_dma_qm_irq_ctrl; + __le32 gic_nic_qm_irq_ctrl; + __le32 gic_dma_core_irq_ctrl; + __le32 gic_host_halt_irq; + __le32 gic_host_ints_irq; + __le32 gic_host_soft_rst_irq; + __le32 gic_rot_qm_irq_ctrl; + __le32 cpu_rst_status; + __le32 eng_arc_irq_ctrl; + __le32 reserved1[20]; /* reserve for future use */ +}; + +/* TODO: remove the desc magic after the code is updated to use message */ +/* HCDM - Habana Communications Descriptor Magic */ +#define HL_COMMS_DESC_MAGIC 0x4843444D +#define HL_COMMS_DESC_VER 3 + +/* HCMv - Habana Communications Message + header version */ +#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00 +#define HL_COMMS_MSG_MAGIC_MASK 0xFFFFFF00 +#define HL_COMMS_MSG_MAGIC_VER_MASK 0xFF + +#define HL_COMMS_MSG_MAGIC_VER(ver) (HL_COMMS_MSG_MAGIC_VALUE | \ + ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK)) +#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC +#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1) +#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2) +#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3) + +#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3 + +#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \ + (((magic) & HL_COMMS_MSG_MAGIC_MASK) == \ + HL_COMMS_MSG_MAGIC_VALUE) + +#define HL_COMMS_MSG_MAGIC_VALIDATE_VERSION(magic, ver) \ + (((magic) & HL_COMMS_MSG_MAGIC_VER_MASK) >= \ + ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK)) + +#define HL_COMMS_MSG_MAGIC_VALIDATE(magic, ver) \ + (HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC((magic)) && \ + HL_COMMS_MSG_MAGIC_VALIDATE_VERSION((magic), (ver))) + +enum comms_msg_type { + HL_COMMS_DESC_TYPE = 0, + HL_COMMS_RESET_CAUSE_TYPE = 1, + HL_COMMS_FW_CFG_SKIP_TYPE = 2, + HL_COMMS_BINNING_CONF_TYPE = 3, +}; + +/* + * Binning information shared between LKD and FW + * @tpc_mask_l - TPC binning information lower 64 bit + * @dec_mask - Decoder binning information + * @dram_mask - DRAM binning information + * @edma_mask - EDMA binning information + * @mme_mask_l - MME binning information lower 32 + * @mme_mask_h - MME binning information upper 32 + * @rot_mask - Rotator binning information + * @xbar_mask - xBAR binning information + * @reserved - reserved field for future binning info w/o ABI change + * @tpc_mask_h - TPC binning information upper 64 bit + * @nic_mask - NIC binning information + */ +struct lkd_fw_binning_info { + __le64 tpc_mask_l; + __le32 dec_mask; + __le32 dram_mask; + __le32 edma_mask; + __le32 mme_mask_l; + __le32 mme_mask_h; + __le32 rot_mask; + __le32 xbar_mask; + __le32 reserved0; + __le64 tpc_mask_h; + __le64 nic_mask; + __le32 reserved1[8]; +}; + +/* TODO: remove this struct after the code is updated to use message */ +/* this is the comms descriptor header - meta data */ +struct comms_desc_header { + __le32 magic; /* magic for validation */ + __le32 crc32; /* CRC32 of the descriptor w/o header */ + __le16 size; /* size of the descriptor w/o header */ + __u8 version; /* descriptor version */ + __u8 reserved[5]; /* pad to 64 bit */ +}; + +/* this is the comms message header - meta data */ +struct comms_msg_header { + __le32 magic; /* magic for validation */ + __le32 crc32; /* CRC32 of the message w/o header */ + __le16 size; /* size of the message w/o header */ + __u8 version; /* message payload version */ + __u8 type; /* message type */ + __u8 reserved[4]; /* pad to 64 bit */ +}; + +enum lkd_fw_ascii_msg_lvls { + LKD_FW_ASCII_MSG_ERR = 0, + LKD_FW_ASCII_MSG_WRN = 1, + LKD_FW_ASCII_MSG_INF = 2, + LKD_FW_ASCII_MSG_DBG = 3, +}; + +#define LKD_FW_ASCII_MSG_MAX_LEN 128 +#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */ + +struct lkd_fw_ascii_msg { + __u8 valid; + __u8 msg_lvl; + __u8 reserved[6]; + char msg[LKD_FW_ASCII_MSG_MAX_LEN]; +}; + +/* this is the main FW descriptor - consider ABI when changing */ +struct lkd_fw_comms_desc { + struct comms_desc_header header; + struct cpu_dyn_regs cpu_dyn_regs; + char fuse_ver[VERSION_MAX_LEN]; + char cur_fw_ver[VERSION_MAX_LEN]; + /* can be used for 1 more version w/o ABI change */ + char reserved0[VERSION_MAX_LEN]; + __le64 img_addr; /* address for next FW component load */ + struct lkd_fw_binning_info binning_info; + struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX]; + __le32 rsvd_mem_size_mb; /* reserved memory size [MB] for FW/SVE */ + char reserved1[4]; +}; + +enum comms_reset_cause { + HL_RESET_CAUSE_UNKNOWN = 0, + HL_RESET_CAUSE_HEARTBEAT = 1, + HL_RESET_CAUSE_TDR = 2, +}; + +/* TODO: remove define after struct name is aligned on all projects */ +#define lkd_msg_comms lkd_fw_comms_msg + +/* this is the comms message descriptor */ +struct lkd_fw_comms_msg { + struct comms_msg_header header; + /* union for future expantions of new messages */ + union { + struct { + struct cpu_dyn_regs cpu_dyn_regs; + char fuse_ver[VERSION_MAX_LEN]; + char cur_fw_ver[VERSION_MAX_LEN]; + /* can be used for 1 more version w/o ABI change */ + char reserved0[VERSION_MAX_LEN]; + /* address for next FW component load */ + __le64 img_addr; + struct lkd_fw_binning_info binning_info; + struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX]; + /* reserved memory size [MB] for FW/SVE */ + __le32 rsvd_mem_size_mb; + char reserved1[4]; + }; + struct { + __u8 reset_cause; + }; + struct { + __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */ + }; + struct lkd_fw_binning_info binning_conf; + }; +}; + +/* + * LKD commands: + * + * COMMS_NOOP Used to clear the command register and no actual + * command is send. + * + * COMMS_CLR_STS Clear status command - FW should clear the + * status register. Used for synchronization + * between the commands as part of the race free + * protocol. + * + * COMMS_RST_STATE Reset the current communication state which is + * kept by FW for proper responses. + * Should be used in the beginning of the + * communication cycle to clean any leftovers from + * previous communication attempts. + * + * COMMS_PREP_DESC Prepare descriptor for setting up the + * communication and other dynamic data: + * struct lkd_fw_comms_desc. + * This command has a parameter stating the next FW + * component size, so the FW can actually prepare a + * space for it and in the status response provide + * the descriptor offset. The Offset of the next FW + * data component is a part of the descriptor + * structure. + * + * COMMS_DATA_RDY The FW data has been uploaded and is ready for + * validation. + * + * COMMS_EXEC Execute the next FW component. + * + * COMMS_RST_DEV Reset the device. + * + * COMMS_GOTO_WFE Execute WFE command. Allowed only on non-secure + * devices. + * + * COMMS_SKIP_BMC Perform actions required for BMC-less servers. + * Do not wait for BMC response. + * + * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory + * space is allocated in a ELBI access only + * address range. + * + */ +enum comms_cmd { + COMMS_NOOP = 0, + COMMS_CLR_STS = 1, + COMMS_RST_STATE = 2, + COMMS_PREP_DESC = 3, + COMMS_DATA_RDY = 4, + COMMS_EXEC = 5, + COMMS_RST_DEV = 6, + COMMS_GOTO_WFE = 7, + COMMS_SKIP_BMC = 8, + COMMS_PREP_DESC_ELBI = 10, + COMMS_INVLD_LAST +}; + +#define COMMS_COMMAND_SIZE_SHIFT 0 +#define COMMS_COMMAND_SIZE_MASK 0x1FFFFFF +#define COMMS_COMMAND_CMD_SHIFT 27 +#define COMMS_COMMAND_CMD_MASK 0xF8000000 + +/* + * LKD command to FW register structure + * @size - FW component size + * @cmd - command from enum comms_cmd + */ +struct comms_command { + union { /* bit fields are only for FW use */ + struct { + u32 size :25; /* 32MB max. */ + u32 reserved :2; + enum comms_cmd cmd :5; /* 32 commands */ + }; + __le32 val; + }; +}; + +/* + * FW status + * + * COMMS_STS_NOOP Used to clear the status register and no actual + * status is provided. + * + * COMMS_STS_ACK Command has been received and recognized. + * + * COMMS_STS_OK Command execution has finished successfully. + * + * COMMS_STS_ERR Command execution was unsuccessful and resulted + * in error. + * + * COMMS_STS_VALID_ERR FW validation has failed. + * + * COMMS_STS_TIMEOUT_ERR Command execution has timed out. + */ +enum comms_sts { + COMMS_STS_NOOP = 0, + COMMS_STS_ACK = 1, + COMMS_STS_OK = 2, + COMMS_STS_ERR = 3, + COMMS_STS_VALID_ERR = 4, + COMMS_STS_TIMEOUT_ERR = 5, + COMMS_STS_INVLD_LAST +}; + +/* RAM types for FW components loading - defines the base address */ +enum comms_ram_types { + COMMS_SRAM = 0, + COMMS_DRAM = 1, +}; + +#define COMMS_STATUS_OFFSET_SHIFT 0 +#define COMMS_STATUS_OFFSET_MASK 0x03FFFFFF +#define COMMS_STATUS_OFFSET_ALIGN_SHIFT 2 +#define COMMS_STATUS_RAM_TYPE_SHIFT 26 +#define COMMS_STATUS_RAM_TYPE_MASK 0x0C000000 +#define COMMS_STATUS_STATUS_SHIFT 28 +#define COMMS_STATUS_STATUS_MASK 0xF0000000 + +/* + * FW status to LKD register structure + * @offset - an offset from the base of the ram_type shifted right by + * 2 bits (always aligned to 32 bits). + * Allows a maximum addressable offset of 256MB from RAM base. + * Example: for real offset in RAM of 0x800000 (8MB), the value + * in offset field is (0x800000 >> 2) = 0x200000. + * @ram_type - the RAM type that should be used for offset from + * enum comms_ram_types + * @status - status from enum comms_sts + */ +struct comms_status { + union { /* bit fields are only for FW use */ + struct { + u32 offset :26; + enum comms_ram_types ram_type :2; + enum comms_sts status :4; /* 16 statuses */ + }; + __le32 val; + }; +}; + +#define NAME_MAX_LEN 32 /* bytes */ +struct hl_module_data { + __u8 name[NAME_MAX_LEN]; + __u8 version[VERSION_MAX_LEN]; +}; + +/** + * struct hl_component_versions - versions associated with hl component. + * @struct_size: size of all the struct (including dynamic size of modules). + * @modules_offset: offset of the modules field in this struct. + * @component: version of the component itself. + * @fw_os: Firmware OS Version. + * @comp_name: Name of the component. + * @modules_counter: number of set bits in modules_mask. + * @reserved: reserved for future use. + * @modules: versions of the component's modules. Elborated explanation in + * struct cpucp_versions. + */ +struct hl_component_versions { + __le16 struct_size; + __le16 modules_offset; + __u8 component[VERSION_MAX_LEN]; + __u8 fw_os[VERSION_MAX_LEN]; + __u8 comp_name[NAME_MAX_LEN]; + __u8 modules_counter; + __u8 reserved[3]; + struct hl_module_data modules[]; +}; + +/* Max size of fit size */ +#define HL_FW_VERSIONS_FIT_SIZE 4096 + +#endif /* HL_BOOT_IF_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index 964ca1f15e3f..7c26db874ff0 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -679,6 +679,7 @@ struct hid_device { /* device report descriptor */ struct list_head debug_list; spinlock_t debug_list_lock; wait_queue_head_t debug_wait; + struct kref ref; unsigned int id; /* system unique id */ @@ -687,6 +688,8 @@ struct hid_device { /* device report descriptor */ #endif /* CONFIG_BPF */ }; +void hiddev_free(struct kref *ref); + #define to_hid_device(pdev) \ container_of(pdev, struct hid_device, dev) @@ -833,11 +836,11 @@ struct hid_driver { void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); -#ifdef CONFIG_PM + int (*suspend)(struct hid_device *hdev, pm_message_t message); int (*resume)(struct hid_device *hdev); int (*reset_resume)(struct hid_device *hdev); -#endif + /* private: */ struct device_driver driver; }; @@ -909,7 +912,7 @@ extern bool hid_ignore(struct hid_device *); extern int hid_add_device(struct hid_device *); extern void hid_destroy_device(struct hid_device *); -extern struct bus_type hid_bus_type; +extern const struct bus_type hid_bus_type; extern int __must_check __hid_register_driver(struct hid_driver *, struct module *, const char *mod_name); diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index e9afb61e6ee0..7118ac28d468 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -77,17 +77,6 @@ enum hid_bpf_attach_flags { int hid_bpf_device_event(struct hid_bpf_ctx *ctx); int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx); -/* Following functions are kfunc that we export to BPF programs */ -/* available everywhere in HID-BPF */ -__u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t __sz); - -/* only available in syscall */ -int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags); -int hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz, - enum hid_report_type rtype, enum hid_class_request reqtype); -struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id); -void hid_bpf_release_context(struct hid_bpf_ctx *ctx); - /* * Below is HID internal */ @@ -115,7 +104,7 @@ struct hid_bpf_ops { size_t len, enum hid_report_type rtype, enum hid_class_request reqtype); struct module *owner; - struct bus_type *bus_type; + const struct bus_type *bus_type; }; extern struct hid_bpf_ops *hid_bpf_ops; diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 99c474de800d..451c1dff0e87 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -454,7 +454,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio, memcpy(to, from, chunk); kunmap_local(from); - from += chunk; + to += chunk; offset += chunk; len -= chunk; } while (len > 0); @@ -484,6 +484,82 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset, } /** + * folio_zero_tail - Zero the tail of a folio. + * @folio: The folio to zero. + * @offset: The byte offset in the folio to start zeroing at. + * @kaddr: The address the folio is currently mapped to. + * + * If you have already used kmap_local_folio() to map a folio, written + * some data to it and now need to zero the end of the folio (and flush + * the dcache), you can use this function. If you do not have the + * folio kmapped (eg the folio has been partially populated by DMA), + * use folio_zero_range() or folio_zero_segment() instead. + * + * Return: An address which can be passed to kunmap_local(). + */ +static inline __must_check void *folio_zero_tail(struct folio *folio, + size_t offset, void *kaddr) +{ + size_t len = folio_size(folio) - offset; + + if (folio_test_highmem(folio)) { + size_t max = PAGE_SIZE - offset_in_page(offset); + + while (len > max) { + memset(kaddr, 0, max); + kunmap_local(kaddr); + len -= max; + offset += max; + max = PAGE_SIZE; + kaddr = kmap_local_folio(folio, offset); + } + } + + memset(kaddr, 0, len); + flush_dcache_folio(folio); + + return kaddr; +} + +/** + * folio_fill_tail - Copy some data to a folio and pad with zeroes. + * @folio: The destination folio. + * @offset: The offset into @folio at which to start copying. + * @from: The data to copy. + * @len: How many bytes of data to copy. + * + * This function is most useful for filesystems which support inline data. + * When they want to copy data from the inode into the page cache, this + * function does everything for them. It supports large folios even on + * HIGHMEM configurations. + */ +static inline void folio_fill_tail(struct folio *folio, size_t offset, + const char *from, size_t len) +{ + char *to = kmap_local_folio(folio, offset); + + VM_BUG_ON(offset + len > folio_size(folio)); + + if (folio_test_highmem(folio)) { + size_t max = PAGE_SIZE - offset_in_page(offset); + + while (len > max) { + memcpy(to, from, max); + kunmap_local(to); + len -= max; + from += max; + offset += max; + max = PAGE_SIZE; + to = kmap_local_folio(folio, offset); + } + } + + memcpy(to, from, len); + to = folio_zero_tail(folio, offset + len, to + len); + kunmap_local(to); +} + +/** * memcpy_from_file_folio - Copy some bytes from a file folio. * @to: The destination buffer. * @folio: The folio to copy from. @@ -551,10 +627,24 @@ static inline void folio_zero_range(struct folio *folio, zero_user_segments(&folio->page, start, start + length, 0, 0); } -static inline void unmap_and_put_page(struct page *page, void *addr) +/** + * folio_release_kmap - Unmap a folio and drop a refcount. + * @folio: The folio to release. + * @addr: The address previously returned by a call to kmap_local_folio(). + * + * It is common, eg in directory handling to kmap a folio. This function + * unmaps the folio and drops the refcount that was being held to keep the + * folio alive while we accessed it. + */ +static inline void folio_release_kmap(struct folio *folio, void *addr) { kunmap_local(addr); - put_page(page); + folio_put(folio); +} + +static inline void unmap_and_put_page(struct page *page, void *addr) +{ + folio_release_kmap(page_folio(page), addr); } #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 39fbfb4be944..5f4c74facf6a 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -108,17 +108,13 @@ enum qm_stop_reason { }; enum qm_state { - QM_INIT = 0, - QM_START, - QM_CLOSE, + QM_WORK = 0, QM_STOP, }; enum qp_state { - QP_INIT = 1, - QP_START, + QP_START = 1, QP_STOP, - QP_CLOSE, }; enum qm_hw_ver { @@ -144,6 +140,13 @@ enum qm_vf_state { QM_NOT_READY, }; +enum qm_misc_ctl_bits { + QM_DRIVER_REMOVING = 0x0, + QM_RST_SCHED, + QM_RESETTING, + QM_MODULE_PARAM, +}; + enum qm_cap_bits { QM_SUPPORT_DB_ISOLATION = 0x0, QM_SUPPORT_FUNC_QOS, @@ -153,6 +156,11 @@ enum qm_cap_bits { QM_SUPPORT_RPM, }; +struct qm_dev_alg { + u64 alg_msk; + const char *alg; +}; + struct dfx_diff_registers { u32 *regs; u32 reg_offset; @@ -258,6 +266,16 @@ struct hisi_qm_cap_info { u32 v3_val; }; +struct hisi_qm_cap_record { + u32 type; + u32 cap_val; +}; + +struct hisi_qm_cap_tables { + struct hisi_qm_cap_record *qm_cap_table; + struct hisi_qm_cap_record *dev_cap_table; +}; + struct hisi_qm_list { struct mutex lock; struct list_head list; @@ -269,6 +287,7 @@ struct hisi_qm_poll_data { struct hisi_qm *qm; struct work_struct work; u16 *qp_finish_id; + u16 eqe_num; }; /** @@ -285,6 +304,18 @@ struct qm_err_isolate { struct list_head qm_hw_errs; }; +struct qm_rsv_buf { + struct qm_sqc *sqc; + struct qm_cqc *cqc; + struct qm_eqc *eqc; + struct qm_aeqc *aeqc; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + dma_addr_t eqc_dma; + dma_addr_t aeqc_dma; + struct qm_dma qcdma; +}; + struct hisi_qm { enum qm_hw_ver ver; enum qm_fun_type fun_type; @@ -317,6 +348,7 @@ struct hisi_qm { dma_addr_t cqc_dma; dma_addr_t eqe_dma; dma_addr_t aeqe_dma; + struct qm_rsv_buf xqc_buf; struct hisi_qm_status status; const struct hisi_qm_err_ini *err_ini; @@ -344,7 +376,6 @@ struct hisi_qm { struct work_struct rst_work; struct work_struct cmd_process; - const char *algs; bool use_sva; resource_size_t phys_base; @@ -355,6 +386,8 @@ struct hisi_qm { u32 mb_qos; u32 type_rate; struct qm_err_isolate isolate_data; + + struct hisi_qm_cap_tables cap_tables; }; struct hisi_qp_status { @@ -471,6 +504,20 @@ static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) mutex_init(&qm_list->lock); } +static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +{ + mutex_lock(&qm_list->lock); + list_add_tail(&qm->list, &qm_list->list); + mutex_unlock(&qm_list->lock); +} + +static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +{ + mutex_lock(&qm_list->lock); + list_del(&qm->list); + mutex_unlock(&qm_list->lock); +} + int hisi_qm_init(struct hisi_qm *qm); void hisi_qm_uninit(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm); @@ -516,8 +563,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); void hisi_qm_dev_shutdown(struct pci_dev *pdev); void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list); +int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); +void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); int hisi_qm_resume(struct device *dev); int hisi_qm_suspend(struct device *dev); void hisi_qm_pm_uninit(struct hisi_qm *qm); @@ -528,6 +575,8 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); u32 hisi_qm_get_hw_info(struct hisi_qm *qm, const struct hisi_qm_cap_info *info_table, u32 index, bool is_read); +int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, + u32 dev_algs_size); /* Used by VFIO ACC live migration driver */ struct pci_driver *hisi_sec_get_pf_driver(void); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 0ee140176f10..641c4567cfa7 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -13,13 +13,13 @@ #define _LINUX_HRTIMER_H #include <linux/hrtimer_defs.h> -#include <linux/rbtree.h> +#include <linux/hrtimer_types.h> #include <linux/init.h> #include <linux/list.h> -#include <linux/percpu.h> +#include <linux/percpu-defs.h> +#include <linux/rbtree.h> #include <linux/seqlock.h> #include <linux/timer.h> -#include <linux/timerqueue.h> struct hrtimer_clock_base; struct hrtimer_cpu_base; @@ -60,14 +60,6 @@ enum hrtimer_mode { }; /* - * Return values for the callback function - */ -enum hrtimer_restart { - HRTIMER_NORESTART, /* Timer is not restarted */ - HRTIMER_RESTART, /* Timer must be restarted */ -}; - -/* * Values to track state of the timer * * Possible states: @@ -95,38 +87,6 @@ enum hrtimer_restart { #define HRTIMER_STATE_ENQUEUED 0x01 /** - * struct hrtimer - the basic hrtimer structure - * @node: timerqueue node, which also manages node.expires, - * the absolute expiry time in the hrtimers internal - * representation. The time is related to the clock on - * which the timer is based. Is setup by adding - * slack to the _softexpires value. For non range timers - * identical to _softexpires. - * @_softexpires: the absolute earliest expiry time of the hrtimer. - * The time which was given as expiry time when the timer - * was armed. - * @function: timer expiry callback function - * @base: pointer to the timer base (per cpu and per clock) - * @state: state information (See bit values above) - * @is_rel: Set if the timer was armed relative - * @is_soft: Set if hrtimer will be expired in soft interrupt context. - * @is_hard: Set if hrtimer will be expired in hard interrupt context - * even on RT. - * - * The hrtimer structure must be initialized by hrtimer_init() - */ -struct hrtimer { - struct timerqueue_node node; - ktime_t _softexpires; - enum hrtimer_restart (*function)(struct hrtimer *); - struct hrtimer_clock_base *base; - u8 state; - u8 is_rel; - u8 is_soft; - u8 is_hard; -}; - -/** * struct hrtimer_sleeper - simple sleeper structure * @timer: embedded timer structure * @task: task to wake up @@ -197,6 +157,7 @@ enum hrtimer_base_type { * @max_hang_time: Maximum time spent in hrtimer_interrupt * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are * expired + * @online: CPU is online from an hrtimers point of view * @timer_waiters: A hrtimer_cancel() invocation waits for the timer * callback to finish. * @expires_next: absolute time of the next event, is required for remote @@ -219,7 +180,8 @@ struct hrtimer_cpu_base { unsigned int hres_active : 1, in_hrtirq : 1, hang_detected : 1, - softirq_activated : 1; + softirq_activated : 1, + online : 1; #ifdef CONFIG_HIGH_RES_TIMERS unsigned int nr_events; unsigned short nr_retries; @@ -531,9 +493,9 @@ extern void sysrq_timer_list_show(void); int hrtimers_prepare_cpu(unsigned int cpu); #ifdef CONFIG_HOTPLUG_CPU -int hrtimers_dead_cpu(unsigned int cpu); +int hrtimers_cpu_dying(unsigned int cpu); #else -#define hrtimers_dead_cpu NULL +#define hrtimers_cpu_dying NULL #endif #endif diff --git a/include/linux/hrtimer_types.h b/include/linux/hrtimer_types.h new file mode 100644 index 000000000000..ad66a3081735 --- /dev/null +++ b/include/linux/hrtimer_types.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HRTIMER_TYPES_H +#define _LINUX_HRTIMER_TYPES_H + +#include <linux/types.h> +#include <linux/timerqueue_types.h> + +struct hrtimer_clock_base; + +/* + * Return values for the callback function + */ +enum hrtimer_restart { + HRTIMER_NORESTART, /* Timer is not restarted */ + HRTIMER_RESTART, /* Timer must be restarted */ +}; + +/** + * struct hrtimer - the basic hrtimer structure + * @node: timerqueue node, which also manages node.expires, + * the absolute expiry time in the hrtimers internal + * representation. The time is related to the clock on + * which the timer is based. Is setup by adding + * slack to the _softexpires value. For non range timers + * identical to _softexpires. + * @_softexpires: the absolute earliest expiry time of the hrtimer. + * The time which was given as expiry time when the timer + * was armed. + * @function: timer expiry callback function + * @base: pointer to the timer base (per cpu and per clock) + * @state: state information (See bit values above) + * @is_rel: Set if the timer was armed relative + * @is_soft: Set if hrtimer will be expired in soft interrupt context. + * @is_hard: Set if hrtimer will be expired in hard interrupt context + * even on RT. + * + * The hrtimer structure must be initialized by hrtimer_init() + */ +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; +}; + +#endif /* _LINUX_HRTIMER_TYPES_H */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index fa0350b0812a..5adb86af35fc 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -67,6 +67,26 @@ extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) +/* + * Mask of all large folio orders supported for anonymous THP; all orders up to + * and including PMD_ORDER, except order-0 (which is not "huge") and order-1 + * (which is a limitation of the THP implementation). + */ +#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) + +/* + * Mask of all large folio orders supported for file THP. + */ +#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER)) + +/* + * Mask of all large folio orders supported for THP. + */ +#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE) + +#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \ + (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order))) + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) @@ -77,45 +97,105 @@ extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) extern unsigned long transparent_hugepage_flags; +extern unsigned long huge_anon_orders_always; +extern unsigned long huge_anon_orders_madvise; +extern unsigned long huge_anon_orders_inherit; -#define hugepage_flags_enabled() \ - (transparent_hugepage_flags & \ - ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \ - (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))) -#define hugepage_flags_always() \ - (transparent_hugepage_flags & \ - (1<<TRANSPARENT_HUGEPAGE_FLAG)) +static inline bool hugepage_global_enabled(void) +{ + return transparent_hugepage_flags & + ((1<<TRANSPARENT_HUGEPAGE_FLAG) | + (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)); +} + +static inline bool hugepage_global_always(void) +{ + return transparent_hugepage_flags & + (1<<TRANSPARENT_HUGEPAGE_FLAG); +} + +static inline bool hugepage_flags_enabled(void) +{ + /* + * We cover both the anon and the file-backed case here; we must return + * true if globally enabled, even when all anon sizes are set to never. + * So we don't need to look at huge_anon_orders_inherit. + */ + return hugepage_global_enabled() || + huge_anon_orders_always || + huge_anon_orders_madvise; +} + +static inline int highest_order(unsigned long orders) +{ + return fls_long(orders) - 1; +} + +static inline int next_order(unsigned long *orders, int prev) +{ + *orders &= ~BIT(prev); + return highest_order(*orders); +} /* * Do the below checks: * - For file vma, check if the linear page offset of vma is - * HPAGE_PMD_NR aligned within the file. The hugepage is - * guaranteed to be hugepage-aligned within the file, but we must - * check that the PMD-aligned addresses in the VMA map to - * PMD-aligned offsets within the file, else the hugepage will - * not be PMD-mappable. - * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE + * order-aligned within the file. The hugepage is + * guaranteed to be order-aligned within the file, but we must + * check that the order-aligned addresses in the VMA map to + * order-aligned offsets within the file, else the hugepage will + * not be mappable. + * - For all vmas, check if the haddr is in an aligned hugepage * area. */ -static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, - unsigned long addr) +static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, + unsigned long addr, int order) { + unsigned long hpage_size = PAGE_SIZE << order; unsigned long haddr; /* Don't have to check pgoff for anonymous vma */ if (!vma_is_anonymous(vma)) { if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, - HPAGE_PMD_NR)) + hpage_size >> PAGE_SHIFT)) return false; } - haddr = addr & HPAGE_PMD_MASK; + haddr = ALIGN_DOWN(addr, hpage_size); - if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) + if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) return false; return true; } +/* + * Filter the bitfield of input orders to the ones suitable for use in the vma. + * See thp_vma_suitable_order(). + * All orders that pass the checks are returned as a bitfield. + */ +static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, + unsigned long addr, unsigned long orders) +{ + int order; + + /* + * Iterate over orders, highest to lowest, removing orders that don't + * meet alignment requirements from the set. Exit loop at first order + * that meets requirements, since all lower orders must also meet + * requirements. + */ + + order = highest_order(orders); + + while (orders) { + if (thp_vma_suitable_order(vma, addr, order)) + break; + order = next_order(&orders, order); + } + + return orders; +} + static inline bool file_thp_enabled(struct vm_area_struct *vma) { struct inode *inode; @@ -126,12 +206,55 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma) inode = vma->vm_file->f_inode; return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) && - (vma->vm_flags & VM_EXEC) && !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); } -bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, - bool smaps, bool in_pf, bool enforce_sysfs); +unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders); + +/** + * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma + * @vma: the vm area to check + * @vm_flags: use these vm_flags instead of vma->vm_flags + * @smaps: whether answer will be used for smaps file + * @in_pf: whether answer will be used by page fault handler + * @enforce_sysfs: whether sysfs config should be taken into account + * @orders: bitfield of all orders to consider + * + * Calculates the intersection of the requested hugepage orders and the allowed + * hugepage orders for the provided vma. Permitted orders are encoded as a set + * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3 + * corresponds to order-3, etc). Order-0 is never considered a hugepage order. + * + * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage + * orders are allowed. + */ +static inline +unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders) +{ + /* Optimization to check if required orders are enabled early. */ + if (enforce_sysfs && vma_is_anonymous(vma)) { + unsigned long mask = READ_ONCE(huge_anon_orders_always); + + if (vm_flags & VM_HUGEPAGE) + mask |= READ_ONCE(huge_anon_orders_madvise); + if (hugepage_global_always() || + ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) + mask |= READ_ONCE(huge_anon_orders_inherit); + + orders &= mask; + if (!orders) + return 0; + } + + return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, + enforce_sysfs, orders); +} #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -267,17 +390,24 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) return false; } -static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, - unsigned long addr) +static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, + unsigned long addr, int order) { return false; } -static inline bool hugepage_vma_check(struct vm_area_struct *vma, - unsigned long vm_flags, bool smaps, - bool in_pf, bool enforce_sysfs) +static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, + unsigned long addr, unsigned long orders) { - return false; + return 0; +} + +static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders) +{ + return 0; } static inline void folio_prep_large_rmappable(struct folio *folio) {} diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 5b2626063f4f..c1ee640d87b1 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -30,7 +30,7 @@ void free_huge_folio(struct folio *folio); #ifdef CONFIG_HUGETLB_PAGE -#include <linux/mempolicy.h> +#include <linux/pagemap.h> #include <linux/shm.h> #include <asm/tlbflush.h> @@ -60,6 +60,7 @@ struct resv_map { long adds_in_progress; struct list_head region_cache; long region_cache_count; + struct rw_semaphore rw_sema; #ifdef CONFIG_CGROUP_HUGETLB /* * On private mappings, the counter to uncharge reservations is stored @@ -138,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *, zap_flags_t); -void __unmap_hugepage_range_final(struct mmu_gather *tlb, +void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags); @@ -245,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end); +extern void __hugetlb_zap_begin(struct vm_area_struct *vma, + unsigned long *begin, unsigned long *end); +extern void __hugetlb_zap_end(struct vm_area_struct *vma, + struct zap_details *details); + +static inline void hugetlb_zap_begin(struct vm_area_struct *vma, + unsigned long *start, unsigned long *end) +{ + if (is_vm_hugetlb_page(vma)) + __hugetlb_zap_begin(vma, start, end); +} + +static inline void hugetlb_zap_end(struct vm_area_struct *vma, + struct zap_details *details) +{ + if (is_vm_hugetlb_page(vma)) + __hugetlb_zap_end(vma, details); +} + void hugetlb_vma_lock_read(struct vm_area_struct *vma); void hugetlb_vma_unlock_read(struct vm_area_struct *vma); void hugetlb_vma_lock_write(struct vm_area_struct *vma); @@ -260,6 +280,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long cp_flags); bool is_hugetlb_entry_migration(pte_t pte); +bool is_hugetlb_entry_hwpoisoned(pte_t pte); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); #else /* !CONFIG_HUGETLB_PAGE */ @@ -296,6 +317,18 @@ static inline void adjust_range_if_pmd_sharing_possible( { } +static inline void hugetlb_zap_begin( + struct vm_area_struct *vma, + unsigned long *start, unsigned long *end) +{ +} + +static inline void hugetlb_zap_end( + struct vm_area_struct *vma, + struct zap_details *details) +{ +} + static inline struct page *hugetlb_follow_page_mask( struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) @@ -441,7 +474,7 @@ static inline long hugetlb_change_protection( return 0; } -static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, +static inline void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags) @@ -512,7 +545,6 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) } struct hugetlbfs_inode_info { - struct shared_policy policy; struct inode vfs_inode; unsigned int seals; }; @@ -716,8 +748,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask); -struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, - unsigned long address); int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t idx); void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, @@ -799,7 +829,7 @@ static inline unsigned huge_page_shift(struct hstate *h) static inline bool hstate_is_gigantic(struct hstate *h) { - return huge_page_order(h) > MAX_ORDER; + return huge_page_order(h) > MAX_PAGE_ORDER; } static inline unsigned int pages_per_huge_page(const struct hstate *h) @@ -812,6 +842,12 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h) return huge_page_size(h) / 512; } +static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, + struct address_space *mapping, pgoff_t idx) +{ + return filemap_lock_folio(mapping, idx << huge_page_order(h)); +} + #include <asm/hugetlb.h> #ifndef is_hugepage_only_range @@ -984,7 +1020,9 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + unsigned long psize = huge_page_size(hstate_vma(vma)); + + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); } #endif @@ -1006,6 +1044,12 @@ static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio return NULL; } +static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, + struct address_space *mapping, pgoff_t idx) +{ + return NULL; +} + static inline int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) { @@ -1026,13 +1070,6 @@ alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, return NULL; } -static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h, - struct vm_area_struct *vma, - unsigned long address) -{ - return NULL; -} - static inline int __alloc_bootmem_huge_page(struct hstate *h) { return 0; @@ -1173,7 +1210,7 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, } static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) + pte_t *ptep, pte_t pte, unsigned long sz) { } @@ -1231,6 +1268,8 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma) return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; } +bool __vma_private_lock(struct vm_area_struct *vma); + /* * Safe version of huge_pte_offset() to check the locks. See comments * above huge_pte_offset(). diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 3d82d91f49ac..e5d64b8b59c2 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -22,13 +22,6 @@ struct resv_map; struct file_region; #ifdef CONFIG_CGROUP_HUGETLB -/* - * Minimum page order trackable by hugetlb cgroup. - * At least 3 pages are necessary for all the tracking information. - * The second tail page contains all of the hugetlb-specific fields. - */ -#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__NR_USED_SUBPAGE) - enum hugetlb_memory_event { HUGETLB_MAX, HUGETLB_NR_MEMORY_EVENTS, @@ -68,8 +61,6 @@ static inline struct hugetlb_cgroup * __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd) { VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); - if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER) - return NULL; if (rsvd) return folio->_hugetlb_cgroup_rsvd; else @@ -91,8 +82,6 @@ static inline void __set_hugetlb_cgroup(struct folio *folio, struct hugetlb_cgroup *h_cg, bool rsvd) { VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); - if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER) - return; if (rsvd) folio->_hugetlb_cgroup_rsvd = h_cg; else diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 8a3115516a1b..136e9842120e 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs); +extern long hwrng_yield(struct hwrng *rng); #endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 3430cc2b05a6..652ecb7abeda 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -23,7 +23,7 @@ #include <linux/swab.h> /* for swab16 */ #include <uapi/linux/i2c.h> -extern struct bus_type i2c_bus_type; +extern const struct bus_type i2c_bus_type; extern struct device_type i2c_adapter_type; extern struct device_type i2c_client_type; @@ -237,7 +237,6 @@ enum i2c_driver_flags { * struct i2c_driver - represent an I2C device driver * @class: What kind of i2c device we instantiate (for detect) * @probe: Callback for device binding - * @probe_new: Transitional callback for device binding - do not use * @remove: Callback for device unbinding * @shutdown: Callback for device shutdown * @alert: Alert callback, for example for the SMBus alert protocol @@ -272,16 +271,8 @@ enum i2c_driver_flags { struct i2c_driver { unsigned int class; - union { /* Standard driver model interfaces */ - int (*probe)(struct i2c_client *client); - /* - * Legacy callback that was part of a conversion of .probe(). - * Today it has the same semantic as .probe(). Don't use for new - * code. - */ - int (*probe_new)(struct i2c_client *client); - }; + int (*probe)(struct i2c_client *client); void (*remove)(struct i2c_client *client); @@ -755,6 +746,8 @@ struct i2c_adapter { struct irq_domain *host_notify_domain; struct regulator *bus_regulator; + + struct dentry *debugfs; }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) @@ -859,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) /* i2c adapter classes (bitmask) */ #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ -#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ #define I2C_CLASS_SPD (1<<7) /* Memory modules */ /* Warn users that the adapter doesn't support classes anymore */ #define I2C_CLASS_DEPRECATED (1<<8) diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h index 90fa83464f00..e119f11948ef 100644 --- a/include/linux/i3c/device.h +++ b/include/linux/i3c/device.h @@ -54,6 +54,7 @@ enum i3c_hdr_mode { * struct i3c_priv_xfer - I3C SDR private transfer * @rnw: encodes the transfer direction. true for a read, false for a write * @len: transfer length in bytes of the transfer + * @actual_len: actual length in bytes are transferred by the controller * @data: input/output buffer * @data.in: input buffer. Must point to a DMA-able buffer * @data.out: output buffer. Must point to a DMA-able buffer @@ -62,6 +63,7 @@ enum i3c_hdr_mode { struct i3c_priv_xfer { u8 rnw; u16 len; + u16 actual_len; union { void *in; const void *out; @@ -96,7 +98,7 @@ enum i3c_dcr { /** * struct i3c_device_info - I3C device information - * @pid: Provisional ID + * @pid: Provisioned ID * @bcr: Bus Characteristic Register * @dcr: Device Characteristic Register * @static_addr: static/I2C address diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 0b52da4f2346..0ca27dd86956 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -24,6 +24,12 @@ struct i2c_client; +/* notifier actions. notifier call data is the struct i3c_bus */ +enum { + I3C_NOTIFY_BUS_ADD, + I3C_NOTIFY_BUS_REMOVE, +}; + struct i3c_master_controller; struct i3c_bus; struct i3c_device; @@ -70,7 +76,6 @@ struct i2c_dev_boardinfo { /** * struct i2c_dev_desc - I2C device descriptor * @common: common part of the I2C device descriptor - * @boardinfo: pointer to the boardinfo attached to this I2C device * @dev: I2C device object registered to the I2C framework * @addr: I2C device address * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about @@ -129,6 +134,7 @@ struct i3c_ibi_slot { * rejected by the master * @num_slots: number of IBI slots reserved for this device * @enabled: reflect the IBI status + * @wq: workqueue used to execute IBI handlers. * @handler: IBI handler specified at i3c_device_request_ibi() call time. This * handler will be called from the controller workqueue, and as such * is allowed to sleep (though it is recommended to process the IBI @@ -151,6 +157,7 @@ struct i3c_device_ibi_info { unsigned int max_payload_len; unsigned int num_slots; unsigned int enabled; + struct workqueue_struct *wq; void (*handler)(struct i3c_device *dev, const struct i3c_ibi_payload *payload); }; @@ -166,7 +173,7 @@ struct i3c_device_ibi_info { * assigned a dynamic address by the master. Will be used during * bus initialization to assign it a specific dynamic address * before starting DAA (Dynamic Address Assignment) - * @pid: I3C Provisional ID exposed by the device. This is a unique identifier + * @pid: I3C Provisioned ID exposed by the device. This is a unique identifier * that may be used to attach boardinfo to i3c_dev_desc when the device * does not have a static address * @of_node: optional DT node in case the device has been described in the DT @@ -426,6 +433,8 @@ struct i3c_bus { * for a future IBI * This method is mandatory only if ->request_ibi is not * NULL. + * @enable_hotjoin: enable hot join event detect. + * @disable_hotjoin: disable hot join event detect. */ struct i3c_master_controller_ops { int (*bus_init)(struct i3c_master_controller *master); @@ -452,6 +461,8 @@ struct i3c_master_controller_ops { int (*disable_ibi)(struct i3c_dev_desc *dev); void (*recycle_ibi_slot)(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot); + int (*enable_hotjoin)(struct i3c_master_controller *master); + int (*disable_hotjoin)(struct i3c_master_controller *master); }; /** @@ -465,11 +476,12 @@ struct i3c_master_controller_ops { * @ops: master operations. See &struct i3c_master_controller_ops * @secondary: true if the master is a secondary master * @init_done: true when the bus initialization is done + * @hotjoin: true if the master support hotjoin * @boardinfo.i3c: list of I3C boardinfo objects * @boardinfo.i2c: list of I2C boardinfo objects * @boardinfo: board-level information attached to devices connected on the bus * @bus: I3C bus exposed by this master - * @wq: workqueue used to execute IBI handlers. Can also be used by master + * @wq: workqueue which can be used by master * drivers if they need to postpone operations that need to take place * in a thread context. Typical examples are Hot Join processing which * requires taking the bus lock in maintenance, which in turn, can only @@ -487,6 +499,7 @@ struct i3c_master_controller { const struct i3c_master_controller_ops *ops; unsigned int secondary : 1; unsigned int init_done : 1; + unsigned int hotjoin: 1; struct { struct list_head i3c; struct list_head i2c; @@ -543,6 +556,8 @@ int i3c_master_register(struct i3c_master_controller *master, const struct i3c_master_controller_ops *ops, bool secondary); void i3c_master_unregister(struct i3c_master_controller *master); +int i3c_master_enable_hotjoin(struct i3c_master_controller *master); +int i3c_master_disable_hotjoin(struct i3c_master_controller *master); /** * i3c_dev_get_master_data() - get master private data attached to an I3C @@ -652,4 +667,9 @@ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot); struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev); +void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data), + void *data); +int i3c_register_notifier(struct notifier_block *nb); +int i3c_unregister_notifier(struct notifier_block *nb); + #endif /* I3C_MASTER_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h index a0dce14090a9..da5f5fa4a3a6 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -200,7 +200,7 @@ static inline void idr_preload_end(void) */ #define idr_for_each_entry_ul(idr, entry, tmp, id) \ for (tmp = 0, id = 0; \ - tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ + ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \ tmp = id, ++id) /** @@ -224,10 +224,12 @@ static inline void idr_preload_end(void) * @id: Entry ID. * * Continue to iterate over entries, continuing after the current position. + * After normal termination @entry is left with the value NULL. This + * is convenient for a "not found" value. */ #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \ for (tmp = id; \ - tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ + ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \ tmp = id, ++id) /* diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index bd2f6e19c357..83c4d060a559 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -172,11 +172,11 @@ #define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) -/* PV1 Layout 11ah 9.8.3.1 */ +/* PV1 Layout IEEE 802.11-2020 9.8.3.1 */ #define IEEE80211_PV1_FCTL_VERS 0x0003 #define IEEE80211_PV1_FCTL_FTYPE 0x001c #define IEEE80211_PV1_FCTL_STYPE 0x00e0 -#define IEEE80211_PV1_FCTL_TODS 0x0100 +#define IEEE80211_PV1_FCTL_FROMDS 0x0100 #define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200 #define IEEE80211_PV1_FCTL_PM 0x0400 #define IEEE80211_PV1_FCTL_MOREDATA 0x0800 @@ -307,6 +307,13 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_TRIGGER_TYPE_BQRP 0x6 #define IEEE80211_TRIGGER_TYPE_NFRP 0x7 +/* UL-bandwidth within common_info of trigger frame */ +#define IEEE80211_TRIGGER_ULBW_MASK 0xc0000 +#define IEEE80211_TRIGGER_ULBW_20MHZ 0x0 +#define IEEE80211_TRIGGER_ULBW_40MHZ 0x1 +#define IEEE80211_TRIGGER_ULBW_80MHZ 0x2 +#define IEEE80211_TRIGGER_ULBW_160_80P80MHZ 0x3 + struct ieee80211_hdr { __le16 frame_control; __le16 duration_id; @@ -951,17 +958,24 @@ struct ieee80211_wide_bw_chansw_ie { * @dtim_count: DTIM Count * @dtim_period: DTIM Period * @bitmap_ctrl: Bitmap Control + * @required_octet: "Syntatic sugar" to force the struct size to the + * minimum valid size when carried in a non-S1G PPDU * @virtual_map: Partial Virtual Bitmap * * This structure represents the payload of the "TIM element" as - * described in IEEE Std 802.11-2020 section 9.4.2.5. + * described in IEEE Std 802.11-2020 section 9.4.2.5. Note that this + * definition is only applicable when the element is carried in a + * non-S1G PPDU. When the TIM is carried in an S1G PPDU, the Bitmap + * Control and Partial Virtual Bitmap may not be present. */ struct ieee80211_tim_ie { u8 dtim_count; u8 dtim_period; u8 bitmap_ctrl; - /* variable size: 1 - 251 bytes */ - u8 virtual_map[1]; + union { + u8 required_octet; + DECLARE_FLEX_ARRAY(u8, virtual_map); + }; } __packed; /** @@ -1239,6 +1253,30 @@ struct ieee80211_twt_setup { u8 params[]; } __packed; +#define IEEE80211_TTLM_MAX_CNT 2 +#define IEEE80211_TTLM_CONTROL_DIRECTION 0x03 +#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP 0x04 +#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT 0x08 +#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT 0x10 +#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE 0x20 + +#define IEEE80211_TTLM_DIRECTION_DOWN 0 +#define IEEE80211_TTLM_DIRECTION_UP 1 +#define IEEE80211_TTLM_DIRECTION_BOTH 2 + +/** + * struct ieee80211_ttlm_elem - TID-To-Link Mapping element + * + * Defined in section 9.4.2.314 in P802.11be_D4 + * + * @control: the first part of control field + * @optional: the second part of control field + */ +struct ieee80211_ttlm_elem { + u8 control; + u8 optional[]; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -1674,6 +1712,8 @@ struct ieee80211_mcs_info { #define IEEE80211_HT_MCS_TX_MAX_STREAMS 4 #define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10 +#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3))) + /* * 802.11n D5.0 20.3.5 / 20.6 says: * - indices 0 to 7 and 32 are single spatial stream @@ -2680,6 +2720,7 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len) #define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 #define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 +#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2 /** * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field @@ -2790,12 +2831,14 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) static inline const struct ieee80211_he_6ghz_oper * ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) { - const u8 *ret = (const void *)&he_oper->optional; + const u8 *ret; u32 he_oper_params; if (!he_oper) return NULL; + ret = (const void *)&he_oper->optional; + he_oper_params = le32_to_cpu(he_oper->he_oper_params); if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)) @@ -3132,6 +3175,28 @@ ieee80211_eht_oper_size_ok(const u8 *data, u8 len) return len >= needed; } +#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1) + +struct ieee80211_bandwidth_indication { + u8 params; + struct ieee80211_eht_operation_info info; +} __packed; + +static inline bool +ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len) +{ + const struct ieee80211_bandwidth_indication *bwi = (const void *)data; + + if (len < sizeof(*bwi)) + return false; + + if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT && + len < sizeof(*bwi) + 2) + return false; + + return true; +} + #define LISTEN_INT_USF GENMASK(15, 14) #define LISTEN_INT_UI GENMASK(13, 0) @@ -3589,6 +3654,8 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_EHT_OPERATION = 106, WLAN_EID_EXT_EHT_MULTI_LINK = 107, WLAN_EID_EXT_EHT_CAPABILITY = 108, + WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109, + WLAN_EID_EXT_BANDWIDTH_INDICATION = 135, }; /* Action category code */ @@ -4356,6 +4423,36 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, } /** + * ieee80211_is_protected_dual_of_public_action - check if skb contains a + * protected dual of public action management frame + * @skb: the skb containing the frame, length will be checked + * + * Return: true if the skb contains a protected dual of public action + * management frame, false otherwise. + */ +static inline bool +ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb) +{ + u8 action; + + if (!ieee80211_is_public_action((void *)skb->data, skb->len) || + skb->len < IEEE80211_MIN_ACTION_SIZE + 1) + return false; + + action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE); + + return action != WLAN_PUB_ACTION_20_40_BSS_COEX && + action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN && + action != WLAN_PUB_ACTION_MSMT_PILOT && + action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES && + action != WLAN_PUB_ACTION_LOC_TRACK_NOTI && + action != WLAN_PUB_ACTION_FTM_REQUEST && + action != WLAN_PUB_ACTION_FTM_RESPONSE && + action != WLAN_PUB_ACTION_FILS_DISCOVERY && + action != WLAN_PUB_ACTION_VENDOR_SPECIFIC; +} + +/** * _ieee80211_is_group_privacy_action - check if frame is a group addressed * privacy action frame * @hdr: the frame @@ -4426,12 +4523,11 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, /** * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) * @skb: the skb containing the frame, length will not be checked - * @hdr_size: the size of the ieee80211_hdr that starts at skb->data * * This function assumes the frame is a data frame, and that the network header * is in the correct place. */ -static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) +static inline int ieee80211_get_tdls_action(struct sk_buff *skb) { if (!skb_is_nonlinear(skb) && skb->len > (skb_network_offset(skb) + 2)) { @@ -5125,6 +5221,39 @@ static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data, fixed + prof->sta_info_len - 1 <= len; } +static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len) +{ + const struct ieee80211_ttlm_elem *t2l = (const void *)data; + u8 control, fixed = sizeof(*t2l), elem_len = 0; + + if (len < fixed) + return false; + + control = t2l->control; + + if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT) + elem_len += 2; + if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT) + elem_len += 3; + + if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) { + u8 bm_size; + + elem_len += 1; + if (len < fixed + elem_len) + return false; + + if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE) + bm_size = 1; + else + bm_size = 2; + + elem_len += hweight8(t2l->optional[0]) * bm_size; + } + + return len >= fixed + elem_len; +} + #define for_each_mle_subelement(_elem, _data, _len) \ if (ieee80211_mle_size_ok(_data, _len)) \ for_each_element(_elem, \ diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 1b9b15a492fa..cdc684e04a2f 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -189,6 +189,8 @@ struct team { struct net_device *dev; /* associated netdevice */ struct team_pcpu_stats __percpu *pcpu_stats; + const struct header_ops *header_ops_cache; + struct mutex lock; /* used for overall locking, e.g. port lists write */ /* diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 3028af87716e..c1645c86eed9 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -540,7 +540,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); if (!eth_type_vlan(veth->h_vlan_proto)) - return -EINVAL; + return -ENODATA; *vlan_tci = ntohs(veth->h_vlan_TCI); return 0; @@ -561,7 +561,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, return 0; } else { *vlan_tci = 0; - return -EINVAL; + return -ENODATA; } } diff --git a/include/linux/igmp.h b/include/linux/igmp.h index ebf4349a53af..5171231f70a8 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -39,7 +39,7 @@ struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct rcu_head rcu; - __be32 sl_addr[]; + __be32 sl_addr[] __counted_by(sl_max); }; #define IP_SFBLOCK 10 /* allocate this many at once */ diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h index 52620e5b8052..b7904992d561 100644 --- a/include/linux/iio/adc/adi-axi-adc.h +++ b/include/linux/iio/adc/adi-axi-adc.h @@ -41,6 +41,7 @@ struct adi_axi_adc_chip_info { * @reg_access IIO debugfs_reg_access hook for the client ADC * @read_raw IIO read_raw hook for the client ADC * @write_raw IIO write_raw hook for the client ADC + * @read_avail IIO read_avail hook for the client ADC */ struct adi_axi_adc_conv { const struct adi_axi_adc_chip_info *chip_info; @@ -54,6 +55,9 @@ struct adi_axi_adc_conv { int (*write_raw)(struct adi_axi_adc_conv *conv, struct iio_chan_spec const *chan, int val, int val2, long mask); + int (*read_avail)(struct adi_axi_adc_conv *conv, + struct iio_chan_spec const *chan, + const int **val, int *type, int *length, long mask); }; struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev, diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 6564bdcdac66..18d3702fa95d 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -19,14 +19,12 @@ struct device; /** * enum iio_block_state - State of a struct iio_dma_buffer_block - * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed */ enum iio_block_state { - IIO_BLOCK_STATE_DEQUEUED, IIO_BLOCK_STATE_QUEUED, IIO_BLOCK_STATE_ACTIVE, IIO_BLOCK_STATE_DONE, @@ -73,12 +71,15 @@ struct iio_dma_buffer_block { * @active_block: Block being used in read() * @pos: Read offset in the active block * @block_size: Size of each block + * @next_dequeue: index of next block that will be dequeued */ struct iio_dma_buffer_queue_fileio { struct iio_dma_buffer_block *blocks[2]; struct iio_dma_buffer_block *active_block; size_t pos; size_t block_size; + + unsigned int next_dequeue; }; /** @@ -93,7 +94,6 @@ struct iio_dma_buffer_queue_fileio { * list and typically also a list of active blocks in the part that handles * the DMA controller * @incoming: List of buffers on the incoming queue - * @outgoing: List of buffers on the outgoing queue * @active: Whether the buffer is currently active * @fileio: FileIO state */ @@ -105,7 +105,6 @@ struct iio_dma_buffer_queue { struct mutex lock; spinlock_t list_lock; struct list_head incoming; - struct list_head outgoing; bool active; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 202e55b0a28b..c5b36d2c1e73 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -427,18 +427,14 @@ struct iio_trigger; /* forward declaration */ * @write_event_config: set if the event is enabled. * @read_event_value: read a configuration value associated with the event. * @write_event_value: write a configuration value for the event. + * @read_event_label: function to request label name for a specified label, + * for better event identification. * @validate_trigger: function to validate the trigger when the * current trigger gets changed. * @update_scan_mode: function to configure device and scan buffer when * channels have changed * @debugfs_reg_access: function to read or write register value of device - * @of_xlate: function pointer to obtain channel specifier index. - * When #iio-cells is greater than '0', the driver could - * provide a custom of_xlate function that reads the - * *args* and returns the appropriate index in registered - * IIO channels array. * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index. - * Functionally the same as @of_xlate. * @hwfifo_set_watermark: function pointer to set the current hardware * fifo watermark level; see hwfifo_* entries in * Documentation/ABI/testing/sysfs-bus-iio for details on @@ -511,6 +507,12 @@ struct iio_info { enum iio_event_direction dir, enum iio_event_info info, int val, int val2); + int (*read_event_label)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + enum iio_event_type type, + enum iio_event_direction dir, + char *label); + int (*validate_trigger)(struct iio_dev *indio_dev, struct iio_trigger *trig); int (*update_scan_mode)(struct iio_dev *indio_dev, @@ -556,7 +558,9 @@ struct iio_buffer_setup_ops { * and owner * @buffer: [DRIVER] any buffer present * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux - * @available_scan_masks: [DRIVER] optional array of allowed bitmasks + * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the + * array in order of preference, the most preferred + * masks first. * @masklength: [INTERN] the length of the mask established from * channels * @active_scan_mask: [INTERN] union of all scan masks requested by buffers diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h index eff1e6b2595c..0f7fe7b522e3 100644 --- a/include/linux/iio/sw_device.h +++ b/include/linux/iio/sw_device.h @@ -51,9 +51,6 @@ void iio_unregister_sw_device_type(struct iio_sw_device_type *dt); struct iio_sw_device *iio_sw_device_create(const char *, const char *); void iio_sw_device_destroy(struct iio_sw_device *); -int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt); -void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt); - static inline void iio_swd_group_init_type_name(struct iio_sw_device *d, const char *name, diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h index 47de2443e984..bc77f88df303 100644 --- a/include/linux/iio/sw_trigger.h +++ b/include/linux/iio/sw_trigger.h @@ -51,9 +51,6 @@ void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt); struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *); void iio_sw_trigger_destroy(struct iio_sw_trigger *); -int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt); -void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt); - static inline void iio_swt_group_init_type_name(struct iio_sw_trigger *t, const char *name, diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 117bde7d6ad7..d89982c98368 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -68,6 +68,7 @@ enum iio_chan_info_enum { IIO_CHAN_INFO_THERMOCOUPLE_TYPE, IIO_CHAN_INFO_CALIBAMBIENT, IIO_CHAN_INFO_ZEROPOINT, + IIO_CHAN_INFO_TROUGH, }; #endif /* _IIO_TYPES_H_ */ diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h index c1c76a70a6ce..adb83a42a6b9 100644 --- a/include/linux/indirect_call_wrapper.h +++ b/include/linux/indirect_call_wrapper.h @@ -11,7 +11,7 @@ * @__VA_ARGS__: arguments for @f * * Avoid retpoline overhead for known builtin, checking @f vs each of them and - * eventually invoking directly the builtin function. The functions are check + * eventually invoking directly the builtin function. The functions are checked * in the given order. Fallback to the indirect call. */ #define INDIRECT_CALL_1(f, f1, ...) \ diff --git a/include/linux/init.h b/include/linux/init.h index 266c3e1640d4..3fa3f6241350 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -89,9 +89,6 @@ __latent_entropy #define __meminitdata __section(".meminit.data") #define __meminitconst __section(".meminit.rodata") -#define __memexit __section(".memexit.text") __exitused __cold notrace -#define __memexitdata __section(".memexit.data") -#define __memexitconst __section(".memexit.rodata") /* For assembly routines */ #define __HEAD .section ".head.text","ax" @@ -182,6 +179,13 @@ extern void (*late_time_init)(void); extern bool initcall_debug; +#ifdef MODULE +extern struct module __this_module; +#define THIS_MODULE (&__this_module) +#else +#define THIS_MODULE ((struct module *)0) +#endif + #endif #ifndef MODULE diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 40fc5813cf93..bccb3f1f6262 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -37,13 +37,6 @@ extern struct cred init_cred; #define INIT_TASK_COMM "swapper" -/* Attach to the init_task data structure for proper alignment */ -#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK -#define __init_task_data __section(".data..init_task") -#else -#define __init_task_data /**/ -#endif - /* Attach to the thread_info data structure for proper alignment */ #define __init_thread_info __section(".data..init_thread_info") diff --git a/include/linux/input.h b/include/linux/input.h index 49790c1bd2c4..de6503c0edb8 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -562,7 +562,7 @@ struct ff_device { int max_effects; struct ff_effect *effects; - struct file *effect_owners[]; + struct file *effect_owners[] __counted_by(max_effects); }; int input_ff_create(struct input_dev *dev, unsigned int max_effects); diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h index 5fba52a56cd6..5705d5de3aea 100644 --- a/include/linux/input/as5011.h +++ b/include/linux/input/as5011.h @@ -7,7 +7,6 @@ */ struct as5011_platform_data { - unsigned int button_gpio; unsigned int axis_irq; /* irq number */ unsigned long axis_irqflags; char xp, xn; /* threshold for x axis */ diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index 3b8580bd33c1..2cf89a538b18 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h @@ -47,7 +47,7 @@ struct input_mt { unsigned int flags; unsigned int frame; int *red; - struct input_mt_slot slots[]; + struct input_mt_slot slots[] __counted_by(num_slots); }; static inline void input_mt_set_value(struct input_mt_slot *slot, diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h index d464ffb4db52..5192ae3f5ec1 100644 --- a/include/linux/input/navpoint.h +++ b/include/linux/input/navpoint.h @@ -5,5 +5,4 @@ struct navpoint_platform_data { int port; /* PXA SSP port for pxa_ssp_request() */ - int gpio; /* GPIO for power on/off */ }; diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h index cda1f706eaeb..aa0b3ffea935 100644 --- a/include/linux/instruction_pointer.h +++ b/include/linux/instruction_pointer.h @@ -2,7 +2,12 @@ #ifndef _LINUX_INSTRUCTION_POINTER_H #define _LINUX_INSTRUCTION_POINTER_H +#include <asm/linkage.h> + #define _RET_IP_ (unsigned long)__builtin_return_address(0) + +#ifndef _THIS_IP_ #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) +#endif #endif /* _LINUX_INSTRUCTION_POINTER_H */ diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index f45f13304add..771622650247 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -94,6 +94,9 @@ int ishtp_cl_link(struct ishtp_cl *cl); void ishtp_cl_unlink(struct ishtp_cl *cl); int ishtp_cl_disconnect(struct ishtp_cl *cl); int ishtp_cl_connect(struct ishtp_cl *cl); +int ishtp_cl_establish_connection(struct ishtp_cl *cl, const guid_t *uuid, + int tx_size, int rx_size, bool reset); +void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset); int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length); int ishtp_cl_flush_queues(struct ishtp_cl *cl); int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb); diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index 04d937ad4dc4..a3529b962be6 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -6,6 +6,25 @@ #ifndef _INTEL_TPMI_H_ #define _INTEL_TPMI_H_ +#include <linux/bitfield.h> + +#define TPMI_VERSION_INVALID 0xff +#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val) +#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val) + +/* + * List of supported TMPI IDs. + * Some TMPI IDs are not used by Linux, so the numbers are not consecutive. + */ +enum intel_tpmi_id { + TPMI_ID_RAPL = 0, /* Running Average Power Limit */ + TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */ + TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */ + TPMI_ID_SST = 5, /* Speed Select Technology */ + TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */ + TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */ +}; + /** * struct intel_tpmi_plat_info - Platform information for a TPMI device instance * @package_id: CPU Package id @@ -26,7 +45,6 @@ struct intel_tpmi_plat_info { struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev); struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index); int tpmi_get_resource_count(struct auxiliary_device *auxdev); - -int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, int *locked, - int *disabled); +int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked, + bool *write_blocked); #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a92bce40b04b..76121c2bb4f8 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -566,11 +566,15 @@ enum * * _ RCU: * 1) rcutree_migrate_callbacks() migrates the queue. - * 2) rcu_report_dead() reports the final quiescent states. + * 2) rcutree_report_cpu_dead() reports the final quiescent states. * * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue + * + * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue */ -#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ)) +#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\ + BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ)) + /* map softirq index to softirq name. update 'softirq_to_name' in * kernel/softirq.c when adding a new softirq. diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 1b7a44b35616..86cf1f7ae389 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -100,6 +100,30 @@ struct io_pgtable_cfg { const struct iommu_flush_ops *tlb; struct device *iommu_dev; + /** + * @alloc: Custom page allocator. + * + * Optional hook used to allocate page tables. If this function is NULL, + * @free must be NULL too. + * + * Memory returned should be zeroed and suitable for dma_map_single() and + * virt_to_phys(). + * + * Not all formats support custom page allocators. Before considering + * passing a non-NULL value, make sure the chosen page format supports + * this feature. + */ + void *(*alloc)(void *cookie, size_t size, gfp_t gfp); + + /** + * @free: Custom page de-allocator. + * + * Optional hook used to free page tables allocated with the @alloc + * hook. Must be non-NULL if @alloc is not NULL, must be NULL + * otherwise. + */ + void (*free)(void *cookie, void *pages, size_t size); + /* Low-level data specific to the table format */ union { struct { @@ -166,6 +190,10 @@ struct io_pgtable_ops { struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); + int (*read_and_clear_dirty)(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); }; /** @@ -238,15 +266,25 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop, } /** + * enum io_pgtable_caps - IO page table backend capabilities. + */ +enum io_pgtable_caps { + /** @IO_PGTABLE_CAP_CUSTOM_ALLOCATOR: Backend accepts custom page table allocators. */ + IO_PGTABLE_CAP_CUSTOM_ALLOCATOR = BIT(0), +}; + +/** * struct io_pgtable_init_fns - Alloc/free a set of page tables for a * particular format. * * @alloc: Allocate a set of page tables described by cfg. * @free: Free the page tables associated with iop. + * @caps: Combination of @io_pgtable_caps flags encoding the backend capabilities. */ struct io_pgtable_init_fns { struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); void (*free)(struct io_pgtable *iop); + u32 caps; }; extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 106cdc55ff3b..68ed6697fece 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -6,63 +6,13 @@ #include <linux/xarray.h> #include <uapi/linux/io_uring.h> -enum io_uring_cmd_flags { - IO_URING_F_COMPLETE_DEFER = 1, - IO_URING_F_UNLOCKED = 2, - /* the request is executed from poll, it should not be freed */ - IO_URING_F_MULTISHOT = 4, - /* executed by io-wq */ - IO_URING_F_IOWQ = 8, - /* int's last bit, sign checks are usually faster than a bit test */ - IO_URING_F_NONBLOCK = INT_MIN, - - /* ctx state flags, for URING_CMD */ - IO_URING_F_SQE128 = (1 << 8), - IO_URING_F_CQE32 = (1 << 9), - IO_URING_F_IOPOLL = (1 << 10), -}; - -struct io_uring_cmd { - struct file *file; - const struct io_uring_sqe *sqe; - union { - /* callback to defer completions to task context */ - void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned); - /* used for polled completion */ - void *cookie; - }; - u32 cmd_op; - u32 flags; - u8 pdu[32]; /* available inline for free use */ -}; - -static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) -{ - return sqe->cmd; -} - #if defined(CONFIG_IO_URING) -int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, - struct iov_iter *iter, void *ioucmd); -void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2, - unsigned issue_flags); -struct sock *io_uring_get_socket(struct file *file); void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); void io_uring_unreg_ringfd(void); const char *io_uring_get_opcode(u8 opcode); -void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, - void (*task_work_cb)(struct io_uring_cmd *, unsigned), - unsigned flags); -/* users should follow semantics of IOU_F_TWQ_LAZY_WAKE */ -void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, - void (*task_work_cb)(struct io_uring_cmd *, unsigned)); - -static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, - void (*task_work_cb)(struct io_uring_cmd *, unsigned)) -{ - __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0); -} +int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags); +bool io_is_uring_fops(struct file *file); static inline void io_uring_files_cancel(void) { @@ -81,29 +31,7 @@ static inline void io_uring_free(struct task_struct *tsk) if (tsk->io_uring) __io_uring_free(tsk); } -int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags); #else -static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, - struct iov_iter *iter, void *ioucmd) -{ - return -EOPNOTSUPP; -} -static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, - ssize_t ret2, unsigned issue_flags) -{ -} -static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, - void (*task_work_cb)(struct io_uring_cmd *, unsigned)) -{ -} -static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, - void (*task_work_cb)(struct io_uring_cmd *, unsigned)) -{ -} -static inline struct sock *io_uring_get_socket(struct file *file) -{ - return NULL; -} static inline void io_uring_task_cancel(void) { } @@ -122,6 +50,10 @@ static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd, { return -EOPNOTSUPP; } +static inline bool io_is_uring_fops(struct file *file) +{ + return false; +} #endif #endif diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h new file mode 100644 index 000000000000..e453a997c060 --- /dev/null +++ b/include/linux/io_uring/cmd.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_IO_URING_CMD_H +#define _LINUX_IO_URING_CMD_H + +#include <uapi/linux/io_uring.h> +#include <linux/io_uring_types.h> + +/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ +#define IORING_URING_CMD_CANCELABLE (1U << 30) + +struct io_uring_cmd { + struct file *file; + const struct io_uring_sqe *sqe; + /* callback to defer completions to task context */ + void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned); + u32 cmd_op; + u32 flags; + u8 pdu[32]; /* available inline for free use */ +}; + +static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) +{ + return sqe->cmd; +} + +#if defined(CONFIG_IO_URING) +int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, + struct iov_iter *iter, void *ioucmd); +void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2, + unsigned issue_flags); +void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, + void (*task_work_cb)(struct io_uring_cmd *, unsigned), + unsigned flags); + +void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, + unsigned int issue_flags); + +#else +static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, + struct iov_iter *iter, void *ioucmd) +{ + return -EOPNOTSUPP; +} +static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, + ssize_t ret2, unsigned issue_flags) +{ +} +static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, + void (*task_work_cb)(struct io_uring_cmd *, unsigned), + unsigned flags) +{ +} +static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ +} +#endif + +/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ +static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, + void (*task_work_cb)(struct io_uring_cmd *, unsigned)) +{ + __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE); +} + +static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, + void (*task_work_cb)(struct io_uring_cmd *, unsigned)) +{ + __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0); +} + +static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) +{ + return cmd_to_io_kiocb(cmd)->task; +} + +#endif /* _LINUX_IO_URING_CMD_H */ diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 13d19b9be9f4..854ad67a5f70 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -7,6 +7,37 @@ #include <linux/llist.h> #include <uapi/linux/io_uring.h> +enum { + /* + * A hint to not wake right away but delay until there are enough of + * tw's queued to match the number of CQEs the task is waiting for. + * + * Must not be used wirh requests generating more than one CQE. + * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. + */ + IOU_F_TWQ_LAZY_WAKE = 1, +}; + +enum io_uring_cmd_flags { + IO_URING_F_COMPLETE_DEFER = 1, + IO_URING_F_UNLOCKED = 2, + /* the request is executed from poll, it should not be freed */ + IO_URING_F_MULTISHOT = 4, + /* executed by io-wq */ + IO_URING_F_IOWQ = 8, + /* int's last bit, sign checks are usually faster than a bit test */ + IO_URING_F_NONBLOCK = INT_MIN, + + /* ctx state flags, for URING_CMD */ + IO_URING_F_SQE128 = (1 << 8), + IO_URING_F_CQE32 = (1 << 9), + IO_URING_F_IOPOLL = (1 << 10), + + /* set when uring wants to cancel a previously issued command */ + IO_URING_F_CANCEL = (1 << 11), + IO_URING_F_COMPAT = (1 << 12), +}; + struct io_wq_work_node { struct io_wq_work_node *next; }; @@ -265,6 +296,12 @@ struct io_ring_ctx { */ struct io_wq_work_list iopoll_list; bool poll_multi_queue; + + /* + * Any cancelable uring_cmd is added to this list in + * ->uring_cmd() by io_uring_cmd_insert_cancelable() + */ + struct hlist_head cancelable_uring_cmd; } ____cacheline_aligned_in_smp; struct { @@ -313,6 +350,13 @@ struct io_ring_ctx { struct list_head cq_overflow_list; struct io_hash_table cancel_table; + struct hlist_head waitid_list; + +#ifdef CONFIG_FUTEX + struct hlist_head futex_list; + struct io_alloc_cache futex_cache; +#endif + const struct cred *sq_creds; /* cred used for __io_sq_thread() */ struct io_sq_data *sq_data; /* if using sq thread polling */ @@ -327,6 +371,9 @@ struct io_ring_ctx { struct list_head io_buffers_cache; + /* deferred free list, protected by ->uring_lock */ + struct hlist_head io_buf_list; + /* Keep this last, we don't need it for the fast path */ struct wait_queue_head poll_wq; struct io_restriction restrictions; @@ -342,11 +389,6 @@ struct io_ring_ctx { struct wait_queue_head rsrc_quiesce_wq; unsigned rsrc_quiesce; - struct list_head io_buffers_pages; - - #if defined(CONFIG_UNIX) - struct socket *ring_sock; - #endif /* hashed buffered write serialization */ struct io_wq_hash *hash_map; @@ -420,6 +462,7 @@ enum { /* keep async read/write and isreg together and in order */ REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, + REQ_F_POLL_NO_LAZY_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -487,6 +530,8 @@ enum { REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT), + /* don't use lazy poll wake for this request */ + REQ_F_POLL_NO_LAZY = BIT(REQ_F_POLL_NO_LAZY_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index c50a769d569a..1ea2a820e1eb 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,6 +13,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/of.h> +#include <linux/iova_bitmap.h> #include <uapi/linux/iommu.h> #define IOMMU_READ (1 << 0) @@ -37,6 +38,7 @@ struct bus_type; struct device; struct iommu_domain; struct iommu_domain_ops; +struct iommu_dirty_ops; struct notifier_block; struct iommu_sva; struct iommu_fault_event; @@ -64,6 +66,10 @@ struct iommu_domain_geometry { #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ +#define __IOMMU_DOMAIN_PLATFORM (1U << 5) + +#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested + on a stage-2 translation */ #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ /* @@ -81,6 +87,8 @@ struct iommu_domain_geometry { * invalidation. * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses * represented by mm_struct's. + * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own + * dma_api stuff. Do not use in new drivers. */ #define IOMMU_DOMAIN_BLOCKED (0U) #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) @@ -91,10 +99,14 @@ struct iommu_domain_geometry { __IOMMU_DOMAIN_DMA_API | \ __IOMMU_DOMAIN_DMA_FQ) #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) +#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM) +#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED) struct iommu_domain { unsigned type; const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + const struct iommu_ops *owner; /* Whose domain_alloc we came from */ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; @@ -109,6 +121,11 @@ struct iommu_domain { struct { /* IOMMU_DOMAIN_SVA */ struct mm_struct *mm; int users; + /* + * Next iommu_domain in mm->iommu_mm->sva-domains list + * protected by iommu_sva_lock. + */ + struct list_head next; }; }; }; @@ -133,6 +150,7 @@ enum iommu_cap { * usefully support the non-strict DMA flush queue. */ IOMMU_CAP_DEFERRED_FLUSH, + IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ }; /* These are the possible reserved region types */ @@ -228,20 +246,183 @@ struct iommu_iotlb_gather { }; /** + * struct iommu_dirty_bitmap - Dirty IOVA bitmap state + * @bitmap: IOVA bitmap + * @gather: Range information for a pending IOTLB flush + */ +struct iommu_dirty_bitmap { + struct iova_bitmap *bitmap; + struct iommu_iotlb_gather *gather; +}; + +/* Read but do not clear any dirty bits */ +#define IOMMU_DIRTY_NO_CLEAR (1 << 0) + +/** + * struct iommu_dirty_ops - domain specific dirty tracking operations + * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain + * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled + * into a bitmap, with a bit represented as a page. + * Reads the dirty PTE bits and clears it from IO + * pagetables. + */ +struct iommu_dirty_ops { + int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); + int (*read_and_clear_dirty)(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); +}; + +/** + * struct iommu_user_data - iommu driver specific user space data info + * @type: The data type of the user buffer + * @uptr: Pointer to the user buffer for copy_from_user() + * @len: The length of the user buffer in bytes + * + * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h + * @type, @uptr and @len should be just copied from an iommufd core uAPI struct. + */ +struct iommu_user_data { + unsigned int type; + void __user *uptr; + size_t len; +}; + +/** + * struct iommu_user_data_array - iommu driver specific user space data array + * @type: The data type of all the entries in the user buffer array + * @uptr: Pointer to the user buffer array + * @entry_len: The fixed-width length of an entry in the array, in bytes + * @entry_num: The number of total entries in the array + * + * The user buffer includes an array of requests with format defined in + * include/uapi/linux/iommufd.h + */ +struct iommu_user_data_array { + unsigned int type; + void __user *uptr; + size_t entry_len; + u32 entry_num; +}; + +/** + * __iommu_copy_struct_from_user - Copy iommu driver specific user space data + * @dst_data: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @src_data: Pointer to a struct iommu_user_data for user space data info + * @data_type: The data type of the @dst_data. Must match with @src_data.type + * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) + * @min_len: Initial length of user data structure for backward compatibility. + * This should be offsetofend using the last member in the user data + * struct that was initially added to include/uapi/linux/iommufd.h + */ +static inline int __iommu_copy_struct_from_user( + void *dst_data, const struct iommu_user_data *src_data, + unsigned int data_type, size_t data_len, size_t min_len) +{ + if (src_data->type != data_type) + return -EINVAL; + if (WARN_ON(!dst_data || !src_data)) + return -EINVAL; + if (src_data->len < min_len || data_len < src_data->len) + return -EINVAL; + return copy_struct_from_user(dst_data, data_len, src_data->uptr, + src_data->len); +} + +/** + * iommu_copy_struct_from_user - Copy iommu driver specific user space data + * @kdst: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @user_data: Pointer to a struct iommu_user_data for user space data info + * @data_type: The data type of the @kdst. Must match with @user_data->type + * @min_last: The last memember of the data structure @kdst points in the + * initial version. + * Return 0 for success, otherwise -error. + */ +#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ + __iommu_copy_struct_from_user(kdst, user_data, data_type, \ + sizeof(*kdst), \ + offsetofend(typeof(*kdst), min_last)) + +/** + * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space + * data from an iommu_user_data_array + * @dst_data: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @src_array: Pointer to a struct iommu_user_data_array for a user space array + * @data_type: The data type of the @dst_data. Must match with @src_array.type + * @index: Index to the location in the array to copy user data from + * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) + * @min_len: Initial length of user data structure for backward compatibility. + * This should be offsetofend using the last member in the user data + * struct that was initially added to include/uapi/linux/iommufd.h + */ +static inline int __iommu_copy_struct_from_user_array( + void *dst_data, const struct iommu_user_data_array *src_array, + unsigned int data_type, unsigned int index, size_t data_len, + size_t min_len) +{ + struct iommu_user_data src_data; + + if (WARN_ON(!src_array || index >= src_array->entry_num)) + return -EINVAL; + if (!src_array->entry_num) + return -EINVAL; + src_data.uptr = src_array->uptr + src_array->entry_len * index; + src_data.len = src_array->entry_len; + src_data.type = src_array->type; + + return __iommu_copy_struct_from_user(dst_data, &src_data, data_type, + data_len, min_len); +} + +/** + * iommu_copy_struct_from_user_array - Copy iommu driver specific user space + * data from an iommu_user_data_array + * @kdst: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @user_array: Pointer to a struct iommu_user_data_array for a user space + * array + * @data_type: The data type of the @kdst. Must match with @user_array->type + * @index: Index to the location in the array to copy user data from + * @min_last: The last member of the data structure @kdst points in the + * initial version. + * Return 0 for success, otherwise -error. + */ +#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \ + min_last) \ + __iommu_copy_struct_from_user_array( \ + kdst, user_array, data_type, index, sizeof(*(kdst)), \ + offsetofend(typeof(*(kdst)), min_last)) + +/** * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @hw_info: report iommu hardware information. The data buffer returned by this * op is allocated in the iommu driver and freed by the caller after * use. The information type is one of enum iommu_hw_info_type defined * in include/uapi/linux/iommufd.h. - * @domain_alloc: allocate iommu domain + * @domain_alloc: allocate and return an iommu domain if success. Otherwise + * NULL is returned. The domain is not fully initialized until + * the caller iommu_domain_alloc() returns. + * @domain_alloc_user: Allocate an iommu domain corresponding to the input + * parameters as defined in include/uapi/linux/iommufd.h. + * Unlike @domain_alloc, it is called only by IOMMUFD and + * must fully initialize the new domain before return. + * Upon success, if the @user_data is valid and the @parent + * points to a kernel-managed domain, the new domain must be + * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be + * NULL while the @user_data can be optionally provided, the + * new domain must support __IOMMU_DOMAIN_PAGING. + * Upon failure, ERR_PTR must be returned. + * @domain_alloc_paging: Allocate an iommu_domain that can be used for + * UNMANAGED, DMA, and DMA_FQ domain types. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU * group and attached to the groups domain - * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op - * is to support old IOMMU drivers, new drivers should use - * default domains, and the common IOMMU DMA ops. * @device_group: find iommu group for a particular device * @get_resv_regions: Request list of reserved regions for a device * @of_xlate: add OF master IDs to iommu grouping @@ -260,6 +441,13 @@ struct iommu_iotlb_gather { * will be blocked by the hardware. * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops + * @identity_domain: An always available, always attachable identity + * translation. + * @blocked_domain: An always available, always attachable blocking + * translation. + * @default_domain: If not NULL this will always be set as the default domain. + * This should be an IDENTITY/BLOCKED/PLATFORM domain. + * Do not use in new drivers. */ struct iommu_ops { bool (*capable)(struct device *dev, enum iommu_cap); @@ -267,11 +455,14 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); + struct iommu_domain *(*domain_alloc_user)( + struct device *dev, u32 flags, struct iommu_domain *parent, + const struct iommu_user_data *user_data); + struct iommu_domain *(*domain_alloc_paging)(struct device *dev); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); void (*probe_finalize)(struct device *dev); - void (*set_platform_dma_ops)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); /* Request/Free a list of reserved regions for a device */ @@ -294,6 +485,9 @@ struct iommu_ops { const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; + struct iommu_domain *identity_domain; + struct iommu_domain *blocked_domain; + struct iommu_domain *default_domain; }; /** @@ -312,15 +506,20 @@ struct iommu_ops { * * ENODEV - device specific errors, not able to be attached * * <others> - treated as ENODEV by the caller. Use is discouraged * @set_dev_pasid: set an iommu domain to a pasid of device - * @map: map a physically contiguous memory region to an iommu domain * @map_pages: map a physically contiguous set of pages of the same size to * an iommu domain. - * @unmap: unmap a physically contiguous memory region from an iommu domain * @unmap_pages: unmap a number of pages of the same size from an iommu domain * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain * @iotlb_sync_map: Sync mappings created recently using @map to the hardware * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush * queue + * @cache_invalidate_user: Flush hardware cache for user space IO page table. + * The @domain must be IOMMU_DOMAIN_NESTED. The @array + * passes in the cache invalidation requests, in form + * of a driver data structure. The driver must update + * array->entry_num to report the number of handled + * invalidation requests. The driver data structure + * must be defined in include/uapi/linux/iommufd.h * @iova_to_phys: translate iova to physical address * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, * including no-snoop TLPs on PCIe or other platform @@ -334,22 +533,20 @@ struct iommu_domain_ops { int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); - int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp); int (*map_pages)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped); - size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *iotlb_gather); size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); - void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, - size_t size); + int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, + size_t size); void (*iotlb_sync)(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather); + int (*cache_invalidate_user)(struct iommu_domain *domain, + struct iommu_user_data_array *array); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); @@ -368,6 +565,7 @@ struct iommu_domain_ops { * @list: Used by the iommu-core to keep a list of registered iommus * @ops: iommu-ops for talking to this iommu * @dev: struct device for sysfs handling + * @singleton_group: Used internally for drivers that have only one group * @max_pasids: number of supported PASIDs */ struct iommu_device { @@ -375,6 +573,7 @@ struct iommu_device { const struct iommu_ops *ops; struct fwnode_handle *fwnode; struct device *dev; + struct iommu_group *singleton_group; u32 max_pasids; }; @@ -418,6 +617,7 @@ struct iommu_fault_param { * @attach_deferred: the dma domain attachment is deferred * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs * @require_direct: device requires IOMMU_RESV_DIRECT regions + * @shadow_on_flush: IOTLB flushes are used to sync shadow tables * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. * struct iommu_group *iommu_group; @@ -433,6 +633,7 @@ struct dev_iommu { u32 attach_deferred:1; u32 pci_32bit_workaround:1; u32 require_direct:1; + u32 shadow_on_flush:1; }; int iommu_device_register(struct iommu_device *iommu, @@ -632,12 +833,35 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return gather && gather->queued; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ + if (gather) + iommu_iotlb_gather_init(gather); + + dirty->bitmap = bitmap; + dirty->gather = gather; +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ + if (dirty->bitmap) + iova_bitmap_set(dirty->bitmap, iova, length); + + if (dirty->gather) + iommu_iotlb_gather_add_range(dirty->gather, iova, length); +} + /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ extern struct iommu_group *generic_device_group(struct device *dev); /* FSL-MC device grouping function */ struct iommu_group *fsl_mc_device_group(struct device *dev); +extern struct iommu_group *generic_single_device_group(struct device *dev); /** * struct iommu_fwspec - per-device IOMMU instance data @@ -670,6 +894,11 @@ struct iommu_sva { struct iommu_domain *domain; }; +struct iommu_mm_data { + u32 pasid; + struct list_head sva_domains; +}; + int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops); void iommu_fwspec_free(struct device *dev); @@ -698,11 +927,9 @@ static inline void *dev_iommu_priv_get(struct device *dev) return NULL; } -static inline void dev_iommu_priv_set(struct device *dev, void *priv) -{ - dev->iommu->priv = priv; -} +void dev_iommu_priv_set(struct device *dev, void *priv); +extern struct mutex iommu_probe_device_lock; int iommu_probe_device(struct device *dev); int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); @@ -737,6 +964,8 @@ struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; struct iommu_iotlb_gather {}; +struct iommu_dirty_bitmap {}; +struct iommu_dirty_ops {}; static inline bool iommu_present(const struct bus_type *bus) { @@ -969,6 +1198,18 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return false; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } @@ -1109,7 +1350,7 @@ static inline void iommu_free_global_pasid(ioasid_t pasid) {} * Creates a mapping at @iova for the buffer described by a scatterlist * stored in the given sg_table object in the provided IOMMU domain. */ -static inline size_t iommu_map_sgtable(struct iommu_domain *domain, +static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain, unsigned long iova, struct sg_table *sgt, int prot) { return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot, @@ -1180,15 +1421,33 @@ static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream return false; } -#ifdef CONFIG_IOMMU_SVA +#ifdef CONFIG_IOMMU_MM_DATA static inline void mm_pasid_init(struct mm_struct *mm) { - mm->pasid = IOMMU_PASID_INVALID; + /* + * During dup_mm(), a new mm will be memcpy'd from an old one and that makes + * the new mm and the old one point to a same iommu_mm instance. When either + * one of the two mms gets released, the iommu_mm instance is freed, leaving + * the other mm running into a use-after-free/double-free problem. To avoid + * the problem, zeroing the iommu_mm pointer of a new mm is needed here. + */ + mm->iommu_mm = NULL; } + static inline bool mm_valid_pasid(struct mm_struct *mm) { - return mm->pasid != IOMMU_PASID_INVALID; + return READ_ONCE(mm->iommu_mm); } + +static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) +{ + struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm); + + if (!iommu_mm) + return IOMMU_PASID_INVALID; + return iommu_mm->pasid; +} + void mm_pasid_drop(struct mm_struct *mm); struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm); @@ -1211,6 +1470,12 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) } static inline void mm_pasid_init(struct mm_struct *mm) {} static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } + +static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) +{ + return IOMMU_PASID_INVALID; +} + static inline void mm_pasid_drop(struct mm_struct *mm) {} #endif /* CONFIG_IOMMU_SVA */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 25d768d48970..db7fe25f3370 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -229,7 +229,7 @@ static inline unsigned long resource_ext_type(const struct resource *res) return res->flags & IORESOURCE_EXT_TYPE_BITS; } /* True iff r1 completely contains r2 */ -static inline bool resource_contains(struct resource *r1, struct resource *r2) +static inline bool resource_contains(const struct resource *r1, const struct resource *r2) { if (resource_type(r1) != resource_type(r2)) return false; @@ -239,13 +239,13 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2) } /* True if any part of r1 overlaps r2 */ -static inline bool resource_overlaps(struct resource *r1, struct resource *r2) +static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2) { return r1->start <= r2->end && r1->end >= r2->start; } -static inline bool -resource_intersection(struct resource *r1, struct resource *r2, struct resource *r) +static inline bool resource_intersection(const struct resource *r1, const struct resource *r2, + struct resource *r) { if (!resource_overlaps(r1, r2)) return false; @@ -254,8 +254,8 @@ resource_intersection(struct resource *r1, struct resource *r2, struct resource return true; } -static inline bool -resource_union(struct resource *r1, struct resource *r2, struct resource *r) +static inline bool resource_union(const struct resource *r1, const struct resource *r2, + struct resource *r) { if (!resource_overlaps(r1, r2)) return false; @@ -331,6 +331,9 @@ extern int walk_system_ram_res(u64 start, u64 end, void *arg, int (*func)(struct resource *, void *)); extern int +walk_system_ram_res_rev(u64 start, u64 end, void *arg, + int (*func)(struct resource *, void *)); +extern int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, void *arg, int (*func)(struct resource *, void *)); diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 7578d4f6a969..db1249cd9692 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -47,7 +47,30 @@ static inline int task_nice_ioclass(struct task_struct *task) } #ifdef CONFIG_BLOCK -int __get_task_ioprio(struct task_struct *p); +/* + * If the task has set an I/O priority, use that. Otherwise, return + * the default I/O priority. + * + * Expected to be called for current task or with task_lock() held to keep + * io_context stable. + */ +static inline int __get_task_ioprio(struct task_struct *p) +{ + struct io_context *ioc = p->io_context; + int prio; + + if (!ioc) + return IOPRIO_DEFAULT; + + if (p != current) + lockdep_assert_held(&p->alloc_lock); + + prio = ioc->ioprio; + if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE) + prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p), + task_nice_ioprio(p)); + return prio; +} #else static inline int __get_task_ioprio(struct task_struct *p) { diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h index cb71aa616bd3..e3649a6563dd 100644 --- a/include/linux/iosys-map.h +++ b/include/linux/iosys-map.h @@ -168,9 +168,9 @@ struct iosys_map { * about the use of uninitialized variable. */ #define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \ - struct iosys_map copy = *map_; \ - iosys_map_incr(©, offset_); \ - copy; \ + struct iosys_map copy_ = *map_; \ + iosys_map_incr(©_, offset_); \ + copy_; \ }) /** @@ -391,14 +391,14 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * Returns: * The value read from the mapping. */ -#define iosys_map_rd(map__, offset__, type__) ({ \ - type__ val; \ - if ((map__)->is_iomem) { \ - __iosys_map_rd_io(val, (map__)->vaddr_iomem + (offset__), type__);\ - } else { \ - __iosys_map_rd_sys(val, (map__)->vaddr + (offset__), type__); \ - } \ - val; \ +#define iosys_map_rd(map__, offset__, type__) ({ \ + type__ val_; \ + if ((map__)->is_iomem) { \ + __iosys_map_rd_io(val_, (map__)->vaddr_iomem + (offset__), type__); \ + } else { \ + __iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \ + } \ + val_; \ }) /** @@ -413,20 +413,20 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * or if pointer may be unaligned (and problematic for the architecture * supported), use iosys_map_memcpy_to() */ -#define iosys_map_wr(map__, offset__, type__, val__) ({ \ - type__ val = (val__); \ - if ((map__)->is_iomem) { \ - __iosys_map_wr_io(val, (map__)->vaddr_iomem + (offset__), type__);\ - } else { \ - __iosys_map_wr_sys(val, (map__)->vaddr + (offset__), type__); \ - } \ +#define iosys_map_wr(map__, offset__, type__, val__) ({ \ + type__ val_ = (val__); \ + if ((map__)->is_iomem) { \ + __iosys_map_wr_io(val_, (map__)->vaddr_iomem + (offset__), type__); \ + } else { \ + __iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \ + } \ }) /** * iosys_map_rd_field - Read a member from a struct in the iosys_map * * @map__: The iosys_map structure - * @struct_offset__: Offset from the beggining of the map, where the struct + * @struct_offset__: Offset from the beginning of the map, where the struct * is located * @struct_type__: The struct describing the layout of the mapping * @field__: Member of the struct to read @@ -485,16 +485,16 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * The value read from the mapping. */ #define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \ - struct_type__ *s; \ + struct_type__ *s_; \ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \ - typeof(s->field__)); \ + typeof(s_->field__)); \ }) /** * iosys_map_wr_field - Write to a member of a struct in the iosys_map * * @map__: The iosys_map structure - * @struct_offset__: Offset from the beggining of the map, where the struct + * @struct_offset__: Offset from the beginning of the map, where the struct * is located * @struct_type__: The struct describing the layout of the mapping * @field__: Member of the struct to read @@ -508,9 +508,9 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * usage and memory layout. */ #define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \ - struct_type__ *s; \ + struct_type__ *s_; \ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \ - typeof(s->field__), val__); \ + typeof(s_->field__), val__); \ }) #endif /* __IOSYS_MAP_H__ */ diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h new file mode 100644 index 000000000000..270454a6703d --- /dev/null +++ b/include/linux/iov_iter.h @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* I/O iterator iteration building functions. + * + * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_IOV_ITER_H +#define _LINUX_IOV_ITER_H + +#include <linux/uio.h> +#include <linux/bvec.h> + +typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len, + void *priv, void *priv2); +typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len, + void *priv, void *priv2); + +/* + * Handle ITER_UBUF. + */ +static __always_inline +size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_ustep_f step) +{ + void __user *base = iter->ubuf; + size_t progress = 0, remain; + + remain = step(base + iter->iov_offset, 0, len, priv, priv2); + progress = len - remain; + iter->iov_offset += progress; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_IOVEC. + */ +static __always_inline +size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_ustep_f step) +{ + const struct iovec *p = iter->__iov; + size_t progress = 0, skip = iter->iov_offset; + + do { + size_t remain, consumed; + size_t part = min(len, p->iov_len - skip); + + if (likely(part)) { + remain = step(p->iov_base + skip, progress, part, priv, priv2); + consumed = part - remain; + progress += consumed; + skip += consumed; + len -= consumed; + if (skip < p->iov_len) + break; + } + p++; + skip = 0; + } while (len); + + iter->nr_segs -= p - iter->__iov; + iter->__iov = p; + iter->iov_offset = skip; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_KVEC. + */ +static __always_inline +size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_step_f step) +{ + const struct kvec *p = iter->kvec; + size_t progress = 0, skip = iter->iov_offset; + + do { + size_t remain, consumed; + size_t part = min(len, p->iov_len - skip); + + if (likely(part)) { + remain = step(p->iov_base + skip, progress, part, priv, priv2); + consumed = part - remain; + progress += consumed; + skip += consumed; + len -= consumed; + if (skip < p->iov_len) + break; + } + p++; + skip = 0; + } while (len); + + iter->nr_segs -= p - iter->kvec; + iter->kvec = p; + iter->iov_offset = skip; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_BVEC. + */ +static __always_inline +size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_step_f step) +{ + const struct bio_vec *p = iter->bvec; + size_t progress = 0, skip = iter->iov_offset; + + do { + size_t remain, consumed; + size_t offset = p->bv_offset + skip, part; + void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE); + + part = min3(len, + (size_t)(p->bv_len - skip), + (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); + remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2); + kunmap_local(kaddr); + consumed = part - remain; + len -= consumed; + progress += consumed; + skip += consumed; + if (skip >= p->bv_len) { + skip = 0; + p++; + } + if (remain) + break; + } while (len); + + iter->nr_segs -= p - iter->bvec; + iter->bvec = p; + iter->iov_offset = skip; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_XARRAY. + */ +static __always_inline +size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_step_f step) +{ + struct folio *folio; + size_t progress = 0; + loff_t start = iter->xarray_start + iter->iov_offset; + pgoff_t index = start / PAGE_SIZE; + XA_STATE(xas, iter->xarray, index); + + rcu_read_lock(); + xas_for_each(&xas, folio, ULONG_MAX) { + size_t remain, consumed, offset, part, flen; + + if (xas_retry(&xas, folio)) + continue; + if (WARN_ON(xa_is_value(folio))) + break; + if (WARN_ON(folio_test_hugetlb(folio))) + break; + + offset = offset_in_folio(folio, start + progress); + flen = min(folio_size(folio) - offset, len); + + while (flen) { + void *base = kmap_local_folio(folio, offset); + + part = min_t(size_t, flen, + PAGE_SIZE - offset_in_page(offset)); + remain = step(base, progress, part, priv, priv2); + kunmap_local(base); + + consumed = part - remain; + progress += consumed; + len -= consumed; + + if (remain || len == 0) + goto out; + flen -= consumed; + offset += consumed; + } + } + +out: + rcu_read_unlock(); + iter->iov_offset += progress; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_DISCARD. + */ +static __always_inline +size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_step_f step) +{ + size_t progress = len; + + iter->count -= progress; + return progress; +} + +/** + * iterate_and_advance2 - Iterate over an iterator + * @iter: The iterator to iterate over. + * @len: The amount to iterate over. + * @priv: Data for the step functions. + * @priv2: More data for the step functions. + * @ustep: Function for UBUF/IOVEC iterators; given __user addresses. + * @step: Function for other iterators; given kernel addresses. + * + * Iterate over the next part of an iterator, up to the specified length. The + * buffer is presented in segments, which for kernel iteration are broken up by + * physical pages and mapped, with the mapped address being presented. + * + * Two step functions, @step and @ustep, must be provided, one for handling + * mapped kernel addresses and the other is given user addresses which have the + * potential to fault since no pinning is performed. + * + * The step functions are passed the address and length of the segment, @priv, + * @priv2 and the amount of data so far iterated over (which can, for example, + * be added to @priv to point to the right part of a second buffer). The step + * functions should return the amount of the segment they didn't process (ie. 0 + * indicates complete processsing). + * + * This function returns the amount of data processed (ie. 0 means nothing was + * processed and the value of @len means processes to completion). + */ +static __always_inline +size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv, + void *priv2, iov_ustep_f ustep, iov_step_f step) +{ + if (unlikely(iter->count < len)) + len = iter->count; + if (unlikely(!len)) + return 0; + + if (likely(iter_is_ubuf(iter))) + return iterate_ubuf(iter, len, priv, priv2, ustep); + if (likely(iter_is_iovec(iter))) + return iterate_iovec(iter, len, priv, priv2, ustep); + if (iov_iter_is_bvec(iter)) + return iterate_bvec(iter, len, priv, priv2, step); + if (iov_iter_is_kvec(iter)) + return iterate_kvec(iter, len, priv, priv2, step); + if (iov_iter_is_xarray(iter)) + return iterate_xarray(iter, len, priv, priv2, step); + return iterate_discard(iter, len, priv, priv2, step); +} + +/** + * iterate_and_advance - Iterate over an iterator + * @iter: The iterator to iterate over. + * @len: The amount to iterate over. + * @priv: Data for the step functions. + * @ustep: Function for UBUF/IOVEC iterators; given __user addresses. + * @step: Function for other iterators; given kernel addresses. + * + * As iterate_and_advance2(), but priv2 is always NULL. + */ +static __always_inline +size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv, + iov_ustep_f ustep, iov_step_f step) +{ + return iterate_and_advance2(iter, len, priv, NULL, ustep, step); +} + +#endif /* _LINUX_IOV_ITER_H */ diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h index c006cf0a25f3..1c338f5e5b7a 100644 --- a/include/linux/iova_bitmap.h +++ b/include/linux/iova_bitmap.h @@ -7,6 +7,7 @@ #define _IOVA_BITMAP_H_ #include <linux/types.h> +#include <linux/errno.h> struct iova_bitmap; @@ -14,6 +15,7 @@ typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap, unsigned long iova, size_t length, void *opaque); +#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, unsigned long page_size, u64 __user *data); @@ -22,5 +24,29 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, iova_bitmap_fn_t fn); void iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length); +#else +static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, + size_t length, + unsigned long page_size, + u64 __user *data) +{ + return NULL; +} + +static inline void iova_bitmap_free(struct iova_bitmap *bitmap) +{ +} + +static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn) +{ + return -EOPNOTSUPP; +} + +static inline void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length) +{ +} +#endif #endif diff --git a/include/linux/ipc.h b/include/linux/ipc.h index e1c9eea6015b..9b1434247aab 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -2,7 +2,7 @@ #ifndef _LINUX_IPC_H #define _LINUX_IPC_H -#include <linux/spinlock.h> +#include <linux/spinlock_types.h> #include <linux/uidgid.h> #include <linux/rhashtable-types.h> #include <uapi/linux/ipc.h> diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 5883551b1ee8..5e605e384aac 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -82,6 +82,7 @@ struct ipv6_devconf { __u32 ioam6_id_wide; __u8 ioam6_enabled; __u8 ndisc_evict_nocarrier; + __u8 ra_honor_pio_life; struct ctl_table_header *sysctl_header; }; @@ -147,6 +148,7 @@ struct inet6_skb_parm { #define IP6SKB_JUMBOGRAM 128 #define IP6SKB_SEG6 256 #define IP6SKB_FAKEJUMBO 512 +#define IP6SKB_MULTIPATH 1024 }; #if defined(CONFIG_NET_L3_MASTER_DEV) @@ -212,28 +214,9 @@ struct ipv6_pinfo { __be32 flow_label; __u32 frag_size; - /* - * Packed in 16bits. - * Omit one shift by putting the signed field at MSB. - */ -#if defined(__BIG_ENDIAN_BITFIELD) - __s16 hop_limit:9; - __u16 __unused_1:7; -#else - __u16 __unused_1:7; - __s16 hop_limit:9; -#endif + s16 hop_limit; + u8 mcast_hops; -#if defined(__BIG_ENDIAN_BITFIELD) - /* Packed in 16bits. */ - __s16 mcast_hops:9; - __u16 __unused_2:6, - mc_loop:1; -#else - __u16 mc_loop:1, - __unused_2:6; - __s16 mcast_hops:9; -#endif int ucast_oif; int mcast_oif; @@ -261,21 +244,11 @@ struct ipv6_pinfo { } rxopt; /* sockopt flags */ - __u16 recverr:1, - sndflow:1, - repflow:1, - pmtudisc:3, - padding:1, /* 1 bit hole */ - srcprefs:3, /* 001: prefer temporary address + __u8 srcprefs; /* 001: prefer temporary address * 010: prefer public address * 100: prefer care-of address */ - dontfrag:1, - autoflowlabel:1, - autoflowlabel_set:1, - mc_all:1, - recverr_rfc4884:1, - rtalert_isolate:1; + __u8 pmtudisc; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; @@ -292,6 +265,18 @@ struct ipv6_pinfo { struct inet6_cork cork; }; +/* We currently use available bits from inet_sk(sk)->inet_flags, + * this could change in the future. + */ +#define inet6_test_bit(nr, sk) \ + test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) +#define inet6_set_bit(nr, sk) \ + set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) +#define inet6_clear_bit(nr, sk) \ + clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) +#define inet6_assign_bit(nr, sk, val) \ + assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val) + /* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */ struct raw6_sock { /* inet_sock has to be the first member of raw6_sock */ diff --git a/include/linux/irq.h b/include/linux/irq.h index d8a6fdce9373..90081afa10ce 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -215,8 +215,6 @@ struct irq_data { * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set * IRQD_CAN_RESERVE - Can use reservation mode - * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change - * required * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked * from actual interrupt context. * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call @@ -247,11 +245,10 @@ enum { IRQD_SINGLE_TARGET = BIT(24), IRQD_DEFAULT_TRIGGER_SET = BIT(25), IRQD_CAN_RESERVE = BIT(26), - IRQD_MSI_NOMASK_QUIRK = BIT(27), - IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28), - IRQD_AFFINITY_ON_ACTIVATE = BIT(29), - IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30), - IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31), + IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27), + IRQD_AFFINITY_ON_ACTIVATE = BIT(28), + IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29), + IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -426,21 +423,6 @@ static inline bool irqd_can_reserve(struct irq_data *d) return __irqd_to_state(d) & IRQD_CAN_RESERVE; } -static inline void irqd_set_msi_nomask_quirk(struct irq_data *d) -{ - __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK; -} - -static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d) -{ - __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK; -} - -static inline bool irqd_msi_nomask_quirk(struct irq_data *d) -{ - return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; -} - static inline void irqd_set_affinity_on_activate(struct irq_data *d) { __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 8cd11a223260..136f2980cba3 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -66,6 +66,9 @@ void irq_work_sync(struct irq_work *work); void irq_work_run(void); bool irq_work_needs_cpu(void); void irq_work_single(void *arg); + +void arch_irq_work_raise(void); + #else static inline bool irq_work_needs_cpu(void) { return false; } static inline void irq_work_run(void) { } diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 51c254b7fec2..ee0a82c60508 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -174,7 +174,7 @@ struct irq_domain { irq_hw_number_t hwirq_max; unsigned int revmap_size; struct radix_tree_root revmap_tree; - struct irq_data __rcu *revmap[]; + struct irq_data __rcu *revmap[] __counted_by(revmap_size); }; /* Irq domain flags */ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 2b665c32f5fe..147feebd508c 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -12,6 +12,7 @@ #ifndef _LINUX_TRACE_IRQFLAGS_H #define _LINUX_TRACE_IRQFLAGS_H +#include <linux/irqflags_types.h> #include <linux/typecheck.h> #include <linux/cleanup.h> #include <asm/irqflags.h> @@ -34,19 +35,6 @@ #ifdef CONFIG_TRACE_IRQFLAGS -/* Per-task IRQ trace events information. */ -struct irqtrace_events { - unsigned int irq_events; - unsigned long hardirq_enable_ip; - unsigned long hardirq_disable_ip; - unsigned int hardirq_enable_event; - unsigned int hardirq_disable_event; - unsigned long softirq_disable_ip; - unsigned long softirq_enable_ip; - unsigned int softirq_disable_event; - unsigned int softirq_enable_event; -}; - DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); diff --git a/include/linux/irqflags_types.h b/include/linux/irqflags_types.h new file mode 100644 index 000000000000..c13f0d915097 --- /dev/null +++ b/include/linux/irqflags_types.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IRQFLAGS_TYPES_H +#define _LINUX_IRQFLAGS_TYPES_H + +#ifdef CONFIG_TRACE_IRQFLAGS + +/* Per-task IRQ trace events information. */ +struct irqtrace_events { + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; +}; + +#endif + +#endif /* _LINUX_IRQFLAGS_TYPES_H */ diff --git a/include/linux/ism.h b/include/linux/ism.h index 9a4c204df3da..5428edd90982 100644 --- a/include/linux/ism.h +++ b/include/linux/ism.h @@ -86,7 +86,6 @@ int ism_register_dmb(struct ism_dev *dev, struct ism_dmb *dmb, int ism_unregister_dmb(struct ism_dev *dev, struct ism_dmb *dmb); int ism_move(struct ism_dev *dev, u64 dmb_tok, unsigned int idx, bool sf, unsigned int offset, void *data, unsigned int size); -u8 *ism_get_seid(void); const struct smcd_ops *ism_get_smcd_ops(void); diff --git a/include/linux/iversion.h b/include/linux/iversion.h index f174ff1b59ee..8f972eaca2ed 100644 --- a/include/linux/iversion.h +++ b/include/linux/iversion.h @@ -256,7 +256,7 @@ inode_peek_iversion(const struct inode *inode) * For filesystems without any sort of change attribute, the best we can * do is fake one up from the ctime: */ -static inline u64 time_to_chattr(struct timespec64 *t) +static inline u64 time_to_chattr(const struct timespec64 *t) { u64 chattr = t->tv_sec; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 52772c826c86..971f3e826e15 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -756,11 +756,6 @@ struct journal_s unsigned long j_flags; /** - * @j_atomic_flags: Atomic journaling state flags. - */ - unsigned long j_atomic_flags; - - /** * @j_errno: * * Is there an outstanding uncleared error on the journal (from a prior @@ -886,7 +881,7 @@ struct journal_s * Journal head shrinker, reclaim buffer's journal head which * has been written back. */ - struct shrinker j_shrinker; + struct shrinker *j_shrinker; /** * @j_checkpoint_jh_count: @@ -999,6 +994,13 @@ struct journal_s struct block_device *j_fs_dev; /** + * @j_fs_dev_wb_err: + * + * Records the errseq of the client fs's backing block device. + */ + errseq_t j_fs_dev_wb_err; + + /** * @j_total_len: Total maximum capacity of the journal region on disk. */ unsigned int j_total_len; @@ -1374,6 +1376,9 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2) JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) +/* Journal high priority write IO operation flags */ +#define JBD2_JOURNAL_REQ_FLAGS (REQ_META | REQ_SYNC | REQ_IDLE) + /* * Journal flag definitions */ @@ -1397,12 +1402,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) JBD2_JOURNAL_FLUSH_ZEROOUT) /* - * Journal atomic flag definitions - */ -#define JBD2_CHECKPOINT_IO_ERROR 0x001 /* Detect io error while writing - * buffer back to disk */ - -/* * Function declarations for the journaling transaction and buffer * management */ @@ -1695,6 +1694,25 @@ static inline void jbd2_journal_abort_handle(handle_t *handle) handle->h_aborted = 1; } +static inline void jbd2_init_fs_dev_write_error(journal_t *journal) +{ + struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping; + + /* + * Save the original wb_err value of client fs's bdev mapping which + * could be used to detect the client fs's metadata async write error. + */ + errseq_check_and_advance(&mapping->wb_err, &journal->j_fs_dev_wb_err); +} + +static inline int jbd2_check_fs_dev_write_error(journal_t *journal) +{ + struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping; + + return errseq_check(&mapping->wb_err, + READ_ONCE(journal->j_fs_dev_wb_err)); +} + #endif /* __KERNEL__ */ /* Comparison functions for transaction IDs: perform comparisons using diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 819b6bc8ac08..dbb06d789e74 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -4,6 +4,7 @@ #include <linux/bug.h> #include <linux/kasan-enabled.h> +#include <linux/kasan-tags.h> #include <linux/kernel.h> #include <linux/static_key.h> #include <linux/types.h> @@ -54,11 +55,13 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; int kasan_populate_early_shadow(const void *shadow_start, const void *shadow_end); +#ifndef kasan_mem_to_shadow static inline void *kasan_mem_to_shadow(const void *addr) { return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET; } +#endif int kasan_add_zero_shadow(void *start, unsigned long size); void kasan_remove_zero_shadow(void *start, unsigned long size); @@ -127,20 +130,39 @@ static __always_inline void kasan_poison_slab(struct slab *slab) __kasan_poison_slab(slab); } -void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); -static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, +void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object); +/** + * kasan_unpoison_new_object - Temporarily unpoison a new slab object. + * @cache: Cache the object belong to. + * @object: Pointer to the object. + * + * This function is intended for the slab allocator's internal use. It + * temporarily unpoisons an object from a newly allocated slab without doing + * anything else. The object must later be repoisoned by + * kasan_poison_new_object(). + */ +static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache, void *object) { if (kasan_enabled()) - __kasan_unpoison_object_data(cache, object); + __kasan_unpoison_new_object(cache, object); } -void __kasan_poison_object_data(struct kmem_cache *cache, void *object); -static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, +void __kasan_poison_new_object(struct kmem_cache *cache, void *object); +/** + * kasan_unpoison_new_object - Repoison a new slab object. + * @cache: Cache the object belong to. + * @object: Pointer to the object. + * + * This function is intended for the slab allocator's internal use. It + * repoisons an object that was previously unpoisoned by + * kasan_unpoison_new_object() without doing anything else. + */ +static __always_inline void kasan_poison_new_object(struct kmem_cache *cache, void *object) { if (kasan_enabled()) - __kasan_poison_object_data(cache, object); + __kasan_poison_new_object(cache, object); } void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, @@ -170,13 +192,6 @@ static __always_inline void kasan_kfree_large(void *ptr) __kasan_kfree_large(ptr, _RET_IP_); } -void __kasan_slab_free_mempool(void *ptr, unsigned long ip); -static __always_inline void kasan_slab_free_mempool(void *ptr) -{ - if (kasan_enabled()) - __kasan_slab_free_mempool(ptr, _RET_IP_); -} - void * __must_check __kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags, bool init); static __always_inline void * __must_check kasan_slab_alloc( @@ -217,6 +232,113 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } +bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, + unsigned long ip); +/** + * kasan_mempool_poison_pages - Check and poison a mempool page allocation. + * @page: Pointer to the page allocation. + * @order: Order of the allocation. + * + * This function is intended for kernel subsystems that cache page allocations + * to reuse them instead of freeing them back to page_alloc (e.g. mempool). + * + * This function is similar to kasan_mempool_poison_object() but operates on + * page allocations. + * + * Before the poisoned allocation can be reused, it must be unpoisoned via + * kasan_mempool_unpoison_pages(). + * + * Return: true if the allocation can be safely reused; false otherwise. + */ +static __always_inline bool kasan_mempool_poison_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + return __kasan_mempool_poison_pages(page, order, _RET_IP_); + return true; +} + +void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, + unsigned long ip); +/** + * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation. + * @page: Pointer to the page allocation. + * @order: Order of the allocation. + * + * This function is intended for kernel subsystems that cache page allocations + * to reuse them instead of freeing them back to page_alloc (e.g. mempool). + * + * This function unpoisons a page allocation that was previously poisoned by + * kasan_mempool_poison_pages() without zeroing the allocation's memory. For + * the tag-based modes, this function assigns a new tag to the allocation. + */ +static __always_inline void kasan_mempool_unpoison_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + __kasan_mempool_unpoison_pages(page, order, _RET_IP_); +} + +bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); +/** + * kasan_mempool_poison_object - Check and poison a mempool slab allocation. + * @ptr: Pointer to the slab allocation. + * + * This function is intended for kernel subsystems that cache slab allocations + * to reuse them instead of freeing them back to the slab allocator (e.g. + * mempool). + * + * This function poisons a slab allocation and saves a free stack trace for it + * without initializing the allocation's memory and without putting it into the + * quarantine (for the Generic mode). + * + * This function also performs checks to detect double-free and invalid-free + * bugs and reports them. The caller can use the return value of this function + * to find out if the allocation is buggy. + * + * Before the poisoned allocation can be reused, it must be unpoisoned via + * kasan_mempool_unpoison_object(). + * + * This function operates on all slab allocations including large kmalloc + * allocations (the ones returned by kmalloc_large() or by kmalloc() with the + * size > KMALLOC_MAX_SIZE). + * + * Return: true if the allocation can be safely reused; false otherwise. + */ +static __always_inline bool kasan_mempool_poison_object(void *ptr) +{ + if (kasan_enabled()) + return __kasan_mempool_poison_object(ptr, _RET_IP_); + return true; +} + +void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip); +/** + * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation. + * @ptr: Pointer to the slab allocation. + * @size: Size to be unpoisoned. + * + * This function is intended for kernel subsystems that cache slab allocations + * to reuse them instead of freeing them back to the slab allocator (e.g. + * mempool). + * + * This function unpoisons a slab allocation that was previously poisoned via + * kasan_mempool_poison_object() and saves an alloc stack trace for it without + * initializing the allocation's memory. For the tag-based modes, this function + * does not assign a new tag to the allocation and instead restores the + * original tags based on the pointer value. + * + * This function operates on all slab allocations including large kmalloc + * allocations (the ones returned by kmalloc_large() or by kmalloc() with the + * size > KMALLOC_MAX_SIZE). + */ +static __always_inline void kasan_mempool_unpoison_object(void *ptr, + size_t size) +{ + if (kasan_enabled()) + __kasan_mempool_unpoison_object(ptr, size, _RET_IP_); +} + /* * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for * the hardware tag-based mode that doesn't rely on compiler instrumentation. @@ -240,9 +362,9 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order, return false; } static inline void kasan_poison_slab(struct slab *slab) {} -static inline void kasan_unpoison_object_data(struct kmem_cache *cache, +static inline void kasan_unpoison_new_object(struct kmem_cache *cache, void *object) {} -static inline void kasan_poison_object_data(struct kmem_cache *cache, +static inline void kasan_poison_new_object(struct kmem_cache *cache, void *object) {} static inline void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object) @@ -254,7 +376,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init return false; } static inline void kasan_kfree_large(void *ptr) {} -static inline void kasan_slab_free_mempool(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags, bool init) { @@ -274,6 +395,17 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } +static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order) +{ + return true; +} +static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {} +static inline bool kasan_mempool_poison_object(void *ptr) +{ + return true; +} +static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {} + static inline bool kasan_check_byte(const void *address) { return true; @@ -283,8 +415,10 @@ static inline bool kasan_check_byte(const void *address) #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) void kasan_unpoison_task_stack(struct task_struct *task); +asmlinkage void kasan_unpoison_task_stack_below(const void *watermark); #else static inline void kasan_unpoison_task_stack(struct task_struct *task) {} +static inline void kasan_unpoison_task_stack_below(const void *watermark) {} #endif #ifdef CONFIG_KASAN_GENERIC @@ -464,10 +598,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ -#ifdef CONFIG_KASAN_INLINE +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) void kasan_non_canonical_hook(unsigned long addr); -#else /* CONFIG_KASAN_INLINE */ +#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ static inline void kasan_non_canonical_hook(unsigned long addr) { } -#endif /* CONFIG_KASAN_INLINE */ +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ #endif /* LINUX_KASAN_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cee8fe87e9f4..d9ad21058eed 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -13,6 +13,7 @@ #include <linux/stdarg.h> #include <linux/align.h> +#include <linux/array_size.h> #include <linux/limits.h> #include <linux/linkage.h> #include <linux/stddef.h> @@ -50,12 +51,6 @@ #define READ 0 #define WRITE 1 -/** - * ARRAY_SIZE - get the number of elements in array @arr - * @arr: array to be sized - */ -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) - #define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) #define u64_to_user_ptr(x) ( \ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 2a36f3218b51..99aaa050ccb7 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -316,6 +316,7 @@ struct kernfs_ops { struct poll_table_struct *pt); int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); + loff_t (*llseek)(struct kernfs_open_file *of, loff_t offset, int whence); }; /* diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 32c78078552c..400cb6c02176 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -22,10 +22,6 @@ #include <uapi/linux/kexec.h> #include <linux/verification.h> -/* Location of a reserved region to hold the crash kernel. - */ -extern struct resource crashk_res; -extern struct resource crashk_low_res; extern note_buf_t __percpu *crash_notes; #ifdef CONFIG_KEXEC_CORE @@ -407,7 +403,7 @@ bool kexec_load_permitted(int kexec_image_type); /* List of defined/legal kexec file flags */ #define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ - KEXEC_FILE_NO_INITRAMFS) + KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG) /* flag to track if kexec reboot is in progress */ extern bool kexec_in_progress; @@ -504,6 +500,13 @@ static inline int crash_hotplug_memory_support(void) { return 0; } static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; } #endif +extern bool kexec_file_dbg_print; + +#define kexec_dprintk(fmt, ...) \ + printk("%s" fmt, \ + kexec_file_dbg_print ? KERN_INFO : KERN_DEBUG, \ + ##__VA_ARGS__) + #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; struct task_struct; diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 7d985a1dfe4a..5caf3ce82373 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -73,6 +73,7 @@ struct key_type { unsigned int flags; #define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */ +#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */ /* vet a description */ int (*vet_description)(const char *description); diff --git a/include/linux/key.h b/include/linux/key.h index 938d7ecfb495..943a432da3ae 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -515,6 +515,7 @@ extern void key_init(void); #define key_init() do { } while(0) #define key_free_user_ns(ns) do { } while(0) #define key_remove_domain(d) do { } while(0) +#define key_lookup(k) NULL #endif /* CONFIG_KEYS */ #endif /* __KERNEL__ */ diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 401af4757514..88100cc9caba 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -223,6 +223,8 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla #else /* CONFIG_KFENCE */ +#define kfence_sample_interval (0) + static inline bool is_kfence_address(const void *addr) { return false; } static inline void kfence_alloc_pool_and_metadata(void) { } static inline void kfence_init(void) { } diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h index 8bfa6c98176d..929287981afe 100644 --- a/include/linux/kmsan_types.h +++ b/include/linux/kmsan_types.h @@ -9,6 +9,8 @@ #ifndef _LINUX_KMSAN_TYPES_H #define _LINUX_KMSAN_TYPES_H +#include <linux/types.h> + /* These constants are defined in the MSan LLVM instrumentation pass. */ #define KMSAN_RETVAL_SIZE 800 #define KMSAN_PARAM_SIZE 800 diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 85a64cb95d75..0ff44d6633e3 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -26,8 +26,7 @@ #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/ftrace.h> -#include <linux/refcount.h> -#include <linux/freelist.h> +#include <linux/objpool.h> #include <linux/rethook.h> #include <asm/kprobes.h> @@ -140,8 +139,8 @@ static inline bool kprobe_ftrace(struct kprobe *p) * */ struct kretprobe_holder { - struct kretprobe *rp; - refcount_t ref; + struct kretprobe __rcu *rp; + struct objpool_head pool; }; struct kretprobe { @@ -154,7 +153,6 @@ struct kretprobe { #ifdef CONFIG_KRETPROBE_ON_RETHOOK struct rethook *rh; #else - struct freelist_head freelist; struct kretprobe_holder *rph; #endif }; @@ -165,10 +163,7 @@ struct kretprobe_instance { #ifdef CONFIG_KRETPROBE_ON_RETHOOK struct rethook_node node; #else - union { - struct freelist_node freelist; - struct rcu_head rcu; - }; + struct rcu_head rcu; struct llist_node llist; struct kretprobe_holder *rph; kprobe_opcode_t *ret_addr; @@ -202,10 +197,8 @@ extern int arch_trampoline_kprobe(struct kprobe *p); #ifdef CONFIG_KRETPROBE_ON_RETHOOK static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri) { - RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), - "Kretprobe is accessed from instance under preemptive context"); - - return (struct kretprobe *)READ_ONCE(ri->node.rethook->data); + /* rethook::data is non-changed field, so that you can access it freely. */ + return (struct kretprobe *)ri->node.rethook->data; } static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri) { @@ -250,10 +243,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs, static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri) { - RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), - "Kretprobe is accessed from instance under preemptive context"); - - return READ_ONCE(ri->rph->rp); + return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held()); } static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri) @@ -450,6 +440,10 @@ int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, char *type, char *sym); + +int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); + #else /* !CONFIG_KPROBES: */ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) diff --git a/include/linux/ksm.h b/include/linux/ksm.h index c2dd786a30e1..401348e9f92b 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -76,8 +76,8 @@ static inline void ksm_exit(struct mm_struct *mm) * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ -struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address); +struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); @@ -129,10 +129,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; } -static inline struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address) +static inline struct folio *ksm_might_need_to_copy(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr) { - return page; + return folio; } static inline void rmap_walk_ksm(struct folio *folio, diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h index 529974e22ea7..7fcf29a4e0de 100644 --- a/include/linux/kstrtox.h +++ b/include/linux/kstrtox.h @@ -147,9 +147,4 @@ extern long simple_strtol(const char *,char **,unsigned int); extern unsigned long long simple_strtoull(const char *,char **,unsigned int); extern long long simple_strtoll(const char *,char **,unsigned int); -static inline int strtobool(const char *s, bool *res) -{ - return kstrtobool(s, res); -} - #endif /* _LINUX_KSTRTOX_H */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 2c30ade43bc8..b11f53c1ba2e 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -86,6 +86,7 @@ void free_kthread_struct(struct task_struct *k); void kthread_bind(struct task_struct *k, unsigned int cpu); void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); int kthread_stop(struct task_struct *k); +int kthread_stop_put(struct task_struct *k); bool kthread_should_stop(void); bool kthread_should_park(void); bool kthread_should_stop_or_park(void); diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 73f20deb497d..3a4e723eae0f 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -21,12 +21,10 @@ #ifndef _LINUX_KTIME_H #define _LINUX_KTIME_H -#include <linux/time.h> -#include <linux/jiffies.h> #include <asm/bug.h> - -/* Nanosecond scalar representation for kernel time values */ -typedef s64 ktime_t; +#include <linux/jiffies.h> +#include <linux/time.h> +#include <linux/types.h> /** * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9d3ac7720da9..7e7fd25b09b3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -80,8 +80,8 @@ /* Two fragments for cross MMIO pages. */ #define KVM_MAX_MMIO_FRAGMENTS 2 -#ifndef KVM_ADDRESS_SPACE_NUM -#define KVM_ADDRESS_SPACE_NUM 1 +#ifndef KVM_MAX_NR_ADDRESS_SPACES +#define KVM_MAX_NR_ADDRESS_SPACES 1 #endif /* @@ -190,8 +190,6 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except); -bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, - unsigned long *vcpu_bitmap); #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -255,12 +253,17 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER +union kvm_mmu_notifier_arg { + pte_t pte; + unsigned long attributes; +}; + struct kvm_gfn_range { struct kvm_memory_slot *slot; gfn_t start; gfn_t end; - pte_t pte; + union kvm_mmu_notifier_arg arg; bool may_block; }; bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); @@ -586,8 +589,20 @@ struct kvm_memory_slot { u32 flags; short id; u16 as_id; + +#ifdef CONFIG_KVM_PRIVATE_MEM + struct { + struct file __rcu *file; + pgoff_t pgoff; + } gmem; +#endif }; +static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot) +{ + return slot && (slot->flags & KVM_MEM_GUEST_MEMFD); +} + static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) { return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; @@ -662,7 +677,7 @@ struct kvm_irq_routing_table { * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ - struct hlist_head map[]; + struct hlist_head map[] __counted_by(nr_rt_entries); }; #endif @@ -675,13 +690,29 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm); #define KVM_MEM_SLOTS_NUM SHRT_MAX #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS) -#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE +#if KVM_MAX_NR_ADDRESS_SPACES == 1 +static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm) +{ + return KVM_MAX_NR_ADDRESS_SPACES; +} + static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) { return 0; } #endif +/* + * Arch code must define kvm_arch_has_private_mem if support for private memory + * is enabled. + */ +#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) +static inline bool kvm_arch_has_private_mem(struct kvm *kvm) +{ + return false; +} +#endif + struct kvm_memslots { u64 generation; atomic_long_t last_used_slot; @@ -719,9 +750,9 @@ struct kvm { struct mm_struct *mm; /* userspace tied to this vm */ unsigned long nr_memslot_pages; /* The two memslot sets - active and inactive (per address space) */ - struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2]; + struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2]; /* The current active memslot set for each address space */ - struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; + struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES]; struct xarray vcpu_array; /* * Protected by slots_lock, but can be read outside if an @@ -751,7 +782,7 @@ struct kvm { struct list_head vm_list; struct mutex lock; struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; -#ifdef CONFIG_HAVE_KVM_EVENTFD +#ifdef CONFIG_HAVE_KVM_IRQCHIP struct { spinlock_t lock; struct list_head items; @@ -759,8 +790,8 @@ struct kvm { struct list_head resampler_list; struct mutex resampler_lock; } irqfds; - struct list_head ioeventfds; #endif + struct list_head ioeventfds; struct kvm_vm_stat stat; struct kvm_arch arch; refcount_t users_count; @@ -776,17 +807,16 @@ struct kvm { * Update side is protected by irq_lock. */ struct kvm_irq_routing_table __rcu *irq_routing; -#endif -#ifdef CONFIG_HAVE_KVM_IRQFD + struct hlist_head irq_ack_notifier_list; #endif -#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER struct mmu_notifier mmu_notifier; unsigned long mmu_invalidate_seq; long mmu_invalidate_in_progress; - unsigned long mmu_invalidate_range_start; - unsigned long mmu_invalidate_range_end; + gfn_t mmu_invalidate_range_start; + gfn_t mmu_invalidate_range_end; #endif struct list_head devices; u64 manual_dirty_log_protect; @@ -805,6 +835,10 @@ struct kvm { #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER struct notifier_block pm_notifier; #endif +#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES + /* Protected by slots_locks (for writes) and RCU (for reads) */ + struct xarray mem_attr_array; +#endif char stats_id[KVM_STATS_NAME_SIZE]; }; @@ -865,6 +899,25 @@ static inline void kvm_vm_bugged(struct kvm *kvm) unlikely(__ret); \ }) +/* + * Note, "data corruption" refers to corruption of host kernel data structures, + * not guest data. Guest data corruption, suspected or confirmed, that is tied + * and contained to a single VM should *never* BUG() and potentially panic the + * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure + * is corrupted and that corruption can have a cascading effect to other parts + * of the hosts and/or to other VMs. + */ +#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \ +({ \ + bool __ret = !!(cond); \ + \ + if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \ + BUG_ON(__ret); \ + else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ + kvm_vm_bugged(kvm); \ + unlikely(__ret); \ +}) + static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PROVE_RCU @@ -944,7 +997,7 @@ static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) } #endif -#ifdef CONFIG_HAVE_KVM_IRQFD +#ifdef CONFIG_HAVE_KVM_IRQCHIP int kvm_irqfd_init(void); void kvm_irqfd_exit(void); #else @@ -968,7 +1021,7 @@ void kvm_put_kvm_no_destroy(struct kvm *kvm); static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) { - as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); + as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES); return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, lockdep_is_held(&kvm->slots_lock) || !refcount_read(&kvm->users_count)); @@ -1125,9 +1178,9 @@ enum kvm_mr_change { }; int kvm_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem); + const struct kvm_userspace_memory_region2 *mem); int __kvm_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem); + const struct kvm_userspace_memory_region2 *mem); void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, @@ -1359,6 +1412,9 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode); void kvm_flush_remote_tlbs(struct kvm *kvm); +void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); +void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot); #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); @@ -1368,10 +1424,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); #endif -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, - unsigned long end); -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, - unsigned long end); +void kvm_mmu_invalidate_begin(struct kvm *kvm); +void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end); +void kvm_mmu_invalidate_end(struct kvm *kvm); +bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); @@ -1387,10 +1443,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, unsigned long mask); void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); -#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT -void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - const struct kvm_memory_slot *memslot); -#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ +#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty, struct kvm_memory_slot **memslot); @@ -1479,11 +1532,23 @@ static inline void kvm_arch_free_vm(struct kvm *kvm) } #endif -#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB -static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) +#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS +static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) { return -ENOTSUPP; } +#else +int kvm_arch_flush_remote_tlbs(struct kvm *kvm); +#endif + +#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE +static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, + gfn_t gfn, u64 nr_pages) +{ + return -EOPNOTSUPP; +} +#else +int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); #endif #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA @@ -1914,7 +1979,7 @@ extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; extern const struct kvm_stats_header kvm_vcpu_stats_header; extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; -#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) { if (unlikely(kvm->mmu_invalidate_in_progress)) @@ -1937,9 +2002,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) return 0; } -static inline int mmu_invalidate_retry_hva(struct kvm *kvm, +static inline int mmu_invalidate_retry_gfn(struct kvm *kvm, unsigned long mmu_seq, - unsigned long hva) + gfn_t gfn) { lockdep_assert_held(&kvm->mmu_lock); /* @@ -1948,10 +2013,20 @@ static inline int mmu_invalidate_retry_hva(struct kvm *kvm, * that might be being invalidated. Note that it may include some false * positives, due to shortcuts when handing concurrent invalidations. */ - if (unlikely(kvm->mmu_invalidate_in_progress) && - hva >= kvm->mmu_invalidate_range_start && - hva < kvm->mmu_invalidate_range_end) - return 1; + if (unlikely(kvm->mmu_invalidate_in_progress)) { + /* + * Dropping mmu_lock after bumping mmu_invalidate_in_progress + * but before updating the range is a KVM bug. + */ + if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA || + kvm->mmu_invalidate_range_end == INVALID_GPA)) + return 1; + + if (gfn >= kvm->mmu_invalidate_range_start && + gfn < kvm->mmu_invalidate_range_end) + return 1; + } + if (kvm->mmu_invalidate_seq != mmu_seq) return 1; return 0; @@ -1980,12 +2055,10 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {} int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); -#ifdef CONFIG_HAVE_KVM_EVENTFD - void kvm_eventfd_init(struct kvm *kvm); int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); -#ifdef CONFIG_HAVE_KVM_IRQFD +#ifdef CONFIG_HAVE_KVM_IRQCHIP int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); void kvm_irqfd_release(struct kvm *kvm); bool kvm_notify_irqfd_resampler(struct kvm *kvm, @@ -2006,31 +2079,7 @@ static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm, { return false; } -#endif - -#else - -static inline void kvm_eventfd_init(struct kvm *kvm) {} - -static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) -{ - return -EINVAL; -} - -static inline void kvm_irqfd_release(struct kvm *kvm) {} - -#ifdef CONFIG_HAVE_KVM_IRQCHIP -static inline void kvm_irq_routing_update(struct kvm *kvm) -{ -} -#endif - -static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) -{ - return -ENOSYS; -} - -#endif /* CONFIG_HAVE_KVM_EVENTFD */ +#endif /* CONFIG_HAVE_KVM_IRQCHIP */ void kvm_arch_irq_routing_update(struct kvm *kvm); @@ -2148,8 +2197,6 @@ struct kvm_device_ops { int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); }; -void kvm_device_get(struct kvm_device *dev); -void kvm_device_put(struct kvm_device *dev); struct kvm_device *kvm_device_from_filp(struct file *filp); int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); void kvm_unregister_device_ops(u32 type); @@ -2287,4 +2334,57 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr) /* Max number of entries allowed for each kvm dirty ring */ #define KVM_DIRTY_RING_MAX_ENTRIES 65536 +static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, + gpa_t gpa, gpa_t size, + bool is_write, bool is_exec, + bool is_private) +{ + vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT; + vcpu->run->memory_fault.gpa = gpa; + vcpu->run->memory_fault.size = size; + + /* RWX flags are not (yet) defined or communicated to userspace. */ + vcpu->run->memory_fault.flags = 0; + if (is_private) + vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; +} + +#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES +static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) +{ + return xa_to_value(xa_load(&kvm->mem_attr_array, gfn)); +} + +bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, + unsigned long attrs); +bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, + struct kvm_gfn_range *range); +bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, + struct kvm_gfn_range *range); + +static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) +{ + return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) && + kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; +} +#else +static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) +{ + return false; +} +#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */ + +#ifdef CONFIG_KVM_PRIVATE_MEM +int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, kvm_pfn_t *pfn, int *max_order); +#else +static inline int kvm_gmem_get_pfn(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t *pfn, int *max_order) +{ + KVM_BUG_ON(1, kvm); + return -EIO; +} +#endif /* CONFIG_KVM_PRIVATE_MEM */ + #endif diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 6f4737d5046a..9d1f7835d8c1 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -6,6 +6,7 @@ struct kvm; struct kvm_async_pf; struct kvm_device_ops; +struct kvm_gfn_range; struct kvm_interrupt; struct kvm_irq_routing_table; struct kvm_memory_slot; diff --git a/include/linux/leds.h b/include/linux/leds.h index 7d428100b42b..4754b02d3a2c 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -100,6 +100,7 @@ struct led_classdev { const char *name; unsigned int brightness; unsigned int max_brightness; + unsigned int color; int flags; /* Lower 16 bits reflect status */ @@ -313,6 +314,8 @@ extern struct led_classdev *of_led_get(struct device_node *np, int index); extern void led_put(struct led_classdev *led_cdev); struct led_classdev *__must_check devm_of_led_get(struct device *dev, int index); +struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev, + int index); /** * led_blink_set - set blinking with software fallback @@ -524,23 +527,6 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) return led_cdev->trigger_data; } -/** - * led_trigger_rename_static - rename a trigger - * @name: the new trigger name - * @trig: the LED trigger to rename - * - * Change a LED trigger name by copying the string passed in - * name into current trigger name, which MUST be large - * enough for the new string. - * - * Note that name must NOT point to the same string used - * during LED registration, as that could lead to races. - * - * This is meant to be used on triggers with statically - * allocated name. - */ -void led_trigger_rename_static(const char *name, struct led_trigger *trig); - #define module_led_trigger(__led_trigger) \ module_driver(__led_trigger, led_trigger_register, \ led_trigger_unregister) @@ -585,6 +571,9 @@ enum led_trigger_netdev_modes { TRIGGER_NETDEV_LINK_10, TRIGGER_NETDEV_LINK_100, TRIGGER_NETDEV_LINK_1000, + TRIGGER_NETDEV_LINK_2500, + TRIGGER_NETDEV_LINK_5000, + TRIGGER_NETDEV_LINK_10000, TRIGGER_NETDEV_HALF_DUPLEX, TRIGGER_NETDEV_FULL_DUPLEX, TRIGGER_NETDEV_TX, diff --git a/include/linux/libata.h b/include/linux/libata.h index 820f7a3a2749..26d68115afb8 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -192,6 +192,7 @@ enum { ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */ ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */ + ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */ ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ @@ -222,6 +223,10 @@ enum { ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */ ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */ + ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */ + ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */ + ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */ + /* bits 24:31 of host->flags are reserved for LLD specific flags */ /* various lengths of time */ @@ -255,7 +260,7 @@ enum { * advised to wait only for the following duration before * doing SRST. */ - ATA_TMOUT_PMP_SRST_WAIT = 5000, + ATA_TMOUT_PMP_SRST_WAIT = 10000, /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might * be a spurious PHY event, so ignore the first PHY event that @@ -314,9 +319,10 @@ enum { ATA_EH_ENABLE_LINK = (1 << 3), ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */ + ATA_EH_SET_ACTIVE = (1 << 7), /* Set a device to active power mode */ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK | - ATA_EH_GET_SUCCESS_SENSE, + ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE, ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | ATA_EH_ENABLE_LINK, @@ -344,7 +350,6 @@ enum { ATA_LINK_RESUME_TRIES = 5, /* how hard are we gonna try to probe/recover devices */ - ATA_PROBE_MAX_TRIES = 3, ATA_EH_DEV_TRIES = 3, ATA_EH_PMP_TRIES = 5, ATA_EH_PMP_LINK_TRIES = 3, @@ -354,7 +359,7 @@ enum { /* This should match the actual table size of * ata_eh_cmd_timeout_table in libata-eh.c. */ - ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7, + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, /* Horkage types. May be set by libata or controller on drives (some horkage may be drive/controller pair dependent */ @@ -466,7 +471,7 @@ enum ata_completion_errors { /* * Link power management policy: If you alter this, you also need to - * alter libata-scsi.c (for the ascii descriptions) + * alter libata-sata.c (for the ascii descriptions) */ enum ata_lpm_policy { ATA_LPM_UNKNOWN, @@ -651,7 +656,7 @@ struct ata_cpr { struct ata_cpr_log { u8 nr_cpr; - struct ata_cpr cpr[]; + struct ata_cpr cpr[] __counted_by(nr_cpr); }; struct ata_device { @@ -977,12 +982,6 @@ struct ata_port_operations { ssize_t size); /* - * Obsolete - */ - void (*phy_reset)(struct ata_port *ap); - void (*eng_timeout)(struct ata_port *ap); - - /* * ->inherits must be the last field and all the preceding * fields must be pointers. */ @@ -1116,7 +1115,7 @@ static inline void ata_sas_port_resume(struct ata_port *ap) extern int ata_ratelimit(void); extern void ata_msleep(struct ata_port *ap, unsigned int msecs); extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, - u32 val, unsigned long interval, unsigned long timeout); + u32 val, unsigned int interval, unsigned int timeout); extern int atapi_cmd_type(u8 opcode); extern unsigned int ata_pack_xfermask(unsigned int pio_mask, unsigned int mwdma_mask, @@ -1151,6 +1150,7 @@ extern int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]); extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); +extern int ata_scsi_slave_alloc(struct scsi_device *sdev); extern int ata_scsi_slave_config(struct scsi_device *sdev); extern void ata_scsi_slave_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, @@ -1166,11 +1166,11 @@ extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port * * SATA specific code - drivers/ata/libata-sata.c */ #ifdef CONFIG_SATA_HOST -extern const unsigned long sata_deb_timing_normal[]; -extern const unsigned long sata_deb_timing_hotplug[]; -extern const unsigned long sata_deb_timing_long[]; +extern const unsigned int sata_deb_timing_normal[]; +extern const unsigned int sata_deb_timing_hotplug[]; +extern const unsigned int sata_deb_timing_long[]; -static inline const unsigned long * +static inline const unsigned int * sata_ehc_deb_timing(struct ata_eh_context *ehc) { if (ehc->i.flags & ATA_EHI_HOTPLUGGED) @@ -1185,14 +1185,14 @@ extern int sata_scr_write(struct ata_link *link, int reg, u32 val); extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); extern int sata_set_spd(struct ata_link *link); extern int sata_link_hardreset(struct ata_link *link, - const unsigned long *timing, unsigned long deadline, + const unsigned int *timing, unsigned long deadline, bool *online, int (*check_ready)(struct ata_link *)); -extern int sata_link_resume(struct ata_link *link, const unsigned long *params, +extern int sata_link_resume(struct ata_link *link, const unsigned int *params, unsigned long deadline); extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link); extern void ata_eh_analyze_ncq_error(struct ata_link *link); #else -static inline const unsigned long * +static inline const unsigned int * sata_ehc_deb_timing(struct ata_eh_context *ehc) { return NULL; @@ -1212,7 +1212,7 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) } static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; } static inline int sata_link_hardreset(struct ata_link *link, - const unsigned long *timing, + const unsigned int *timing, unsigned long deadline, bool *online, int (*check_ready)(struct ata_link *)) @@ -1222,7 +1222,7 @@ static inline int sata_link_hardreset(struct ata_link *link, return -EOPNOTSUPP; } static inline int sata_link_resume(struct ata_link *link, - const unsigned long *params, + const unsigned int *params, unsigned long deadline) { return -EOPNOTSUPP; @@ -1234,20 +1234,15 @@ static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link) static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { } #endif extern int sata_link_debounce(struct ata_link *link, - const unsigned long *params, unsigned long deadline); + const unsigned int *params, unsigned long deadline); extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, bool spm_wakeup); extern int ata_slave_link_init(struct ata_port *ap); -extern void ata_sas_port_destroy(struct ata_port *); extern struct ata_port *ata_sas_port_alloc(struct ata_host *, struct ata_port_info *, struct Scsi_Host *); -extern void ata_sas_async_probe(struct ata_port *ap); -extern int ata_sas_sync_probe(struct ata_port *ap); -extern int ata_sas_port_init(struct ata_port *); -extern int ata_sas_port_start(struct ata_port *ap); +extern void ata_port_probe(struct ata_port *ap); extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap); extern void ata_sas_tport_delete(struct ata_port *ap); -extern void ata_sas_port_stop(struct ata_port *ap); extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); extern void ata_tf_to_fis(const struct ata_taskfile *tf, @@ -1404,6 +1399,7 @@ extern const struct attribute_group *ata_common_sdev_groups[]; .this_id = ATA_SHT_THIS_ID, \ .emulated = ATA_SHT_EMULATED, \ .proc_name = drv_name, \ + .slave_alloc = ata_scsi_slave_alloc, \ .slave_destroy = ata_scsi_slave_destroy, \ .bios_param = ata_std_bios_param, \ .unlock_native_capacity = ata_scsi_unlock_native_capacity,\ @@ -1565,6 +1561,11 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...); extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, const char *name); #endif +static inline void ata_port_desc_misc(struct ata_port *ap, int irq) +{ + ata_port_desc(ap, "irq %d", irq); + ata_port_desc(ap, "lpm-pol %d", ap->target_lpm_policy); +} static inline bool ata_tag_internal(unsigned int tag) { @@ -1785,7 +1786,7 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - if (unlikely(!qc) || !ap->ops->error_handler) + if (unlikely(!qc)) return qc; if ((qc->flags & (ATA_QCFLAG_ACTIVE | @@ -1876,7 +1877,7 @@ static inline int ata_check_ready(u8 status) } static inline unsigned long ata_deadline(unsigned long from_jiffies, - unsigned long timeout_msecs) + unsigned int timeout_msecs) { return from_jiffies + msecs_to_jiffies(timeout_msecs); } @@ -1885,23 +1886,21 @@ static inline unsigned long ata_deadline(unsigned long from_jiffies, change in future hardware and specs, secondly 0xFF means 'no DMA' but is > UDMA_0. Dyma ddreigiau */ -static inline int ata_using_mwdma(struct ata_device *adev) +static inline bool ata_using_mwdma(struct ata_device *adev) { - if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4) - return 1; - return 0; + return adev->dma_mode >= XFER_MW_DMA_0 && + adev->dma_mode <= XFER_MW_DMA_4; } -static inline int ata_using_udma(struct ata_device *adev) +static inline bool ata_using_udma(struct ata_device *adev) { - if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7) - return 1; - return 0; + return adev->dma_mode >= XFER_UDMA_0 && + adev->dma_mode <= XFER_UDMA_7; } -static inline int ata_dma_enabled(struct ata_device *adev) +static inline bool ata_dma_enabled(struct ata_device *adev) { - return (adev->dma_mode == 0xFF ? 0 : 1); + return adev->dma_mode != 0xFF; } /************************************************************************** diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index 15e0e0209da4..287f590ed56b 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -10,6 +10,11 @@ static inline void linkmode_zero(unsigned long *dst) bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); } +static inline void linkmode_fill(unsigned long *dst) +{ + bitmap_fill(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + static inline void linkmode_copy(unsigned long *dst, const unsigned long *src) { bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -43,15 +48,6 @@ static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) __set_bit(nr, addr); } -static inline void linkmode_set_bit_array(const int *array, int array_size, - unsigned long *addr) -{ - int i; - - for (i = 0; i < array_size; i++) - linkmode_set_bit(array[i], addr); -} - static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) { __clear_bit(nr, addr); @@ -71,6 +67,15 @@ static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr) return test_bit(nr, addr); } +static inline void linkmode_set_bit_array(const int *array, int array_size, + unsigned long *addr) +{ + int i; + + for (i = 0; i < array_size; i++) + linkmode_set_bit(array[i], addr); +} + static inline int linkmode_equal(const unsigned long *src1, const unsigned long *src2) { diff --git a/include/linux/list.h b/include/linux/list.h index 164b4d0e9d2a..059aa1fff41e 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -687,6 +687,14 @@ static inline void list_splice_tail_init(struct list_head *list, for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next) /** + * list_for_each_reverse - iterate backwards over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_reverse(pos, head) \ + for (pos = (head)->prev; pos != (head); pos = pos->prev) + +/** * list_for_each_rcu - Iterate over a list in an RCU-safe fashion * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. @@ -1111,6 +1119,26 @@ static inline void hlist_move_list(struct hlist_head *old, old->first = NULL; } +/** + * hlist_splice_init() - move all entries from one list to another + * @from: hlist_head from which entries will be moved + * @last: last entry on the @from list + * @to: hlist_head to which entries will be moved + * + * @to can be empty, @from must contain at least @last. + */ +static inline void hlist_splice_init(struct hlist_head *from, + struct hlist_node *last, + struct hlist_head *to) +{ + if (to->first) + to->first->pprev = &last->next; + last->next = to->first; + to->first = from->first; + from->first->pprev = &to->first; + from->first = NULL; +} + #define hlist_entry(ptr, type, member) container_of(ptr,type,member) #define hlist_for_each(pos, head) \ diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index b35968ee9fb5..7675a48a0701 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -73,8 +73,10 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren /** * list_lru_add: add an element to the lru list's tail - * @list_lru: the lru pointer + * @lru: the lru pointer * @item: the item to be added. + * @nid: the node id of the sublist to add the item to. + * @memcg: the cgroup of the sublist to add the item to. * * If the element is already part of a list, this function returns doing * nothing. Therefore the caller does not need to keep state about whether or @@ -83,24 +85,54 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren * the caller organize itself in a way that elements can be in more than * one type of list, it is up to the caller to fully remove the item from * the previous list (with list_lru_del() for instance) before moving it - * to @list_lru + * to @lru. + * + * Return: true if the list was updated, false otherwise + */ +bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid, + struct mem_cgroup *memcg); + +/** + * list_lru_add_obj: add an element to the lru list's tail + * @lru: the lru pointer + * @item: the item to be added. + * + * This function is similar to list_lru_add(), but the NUMA node and the + * memcg of the sublist is determined by @item list_head. This assumption is + * valid for slab objects LRU such as dentries, inodes, etc. * * Return value: true if the list was updated, false otherwise */ -bool list_lru_add(struct list_lru *lru, struct list_head *item); +bool list_lru_add_obj(struct list_lru *lru, struct list_head *item); /** - * list_lru_del: delete an element to the lru list - * @list_lru: the lru pointer + * list_lru_del: delete an element from the lru list + * @lru: the lru pointer * @item: the item to be deleted. + * @nid: the node id of the sublist to delete the item from. + * @memcg: the cgroup of the sublist to delete the item from. * - * This function works analogously as list_lru_add in terms of list + * This function works analogously as list_lru_add() in terms of list * manipulation. The comments about an element already pertaining to - * a list are also valid for list_lru_del. + * a list are also valid for list_lru_del(). * - * Return value: true if the list was updated, false otherwise + * Return: true if the list was updated, false otherwise */ -bool list_lru_del(struct list_lru *lru, struct list_head *item); +bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid, + struct mem_cgroup *memcg); + +/** + * list_lru_del_obj: delete an element from the lru list + * @lru: the lru pointer + * @item: the item to be deleted. + * + * This function is similar to list_lru_del(), but the NUMA node and the + * memcg of the sublist is determined by @item list_head. This assumption is + * valid for slab objects LRU such as dentries, inodes, etc. + * + * Return value: true if the list was updated, false otherwise. + */ +bool list_lru_del_obj(struct list_lru *lru, struct list_head *item); /** * list_lru_count_one: return the number of objects currently held by @lru @@ -108,9 +140,11 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item); * @nid: the node id to count from. * @memcg: the cgroup to count from. * - * Always return a non-negative number, 0 for empty lists. There is no - * guarantee that the list is not updated while the count is being computed. - * Callers that want such a guarantee need to provide an outer lock. + * There is no guarantee that the list is not updated while the count is being + * computed. Callers that want such a guarantee need to provide an outer lock. + * + * Return: 0 for empty lists, otherwise the number of objects + * currently held by @lru. */ unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg); @@ -136,12 +170,28 @@ static inline unsigned long list_lru_count(struct list_lru *lru) void list_lru_isolate(struct list_lru_one *list, struct list_head *item); void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head); +/** + * list_lru_putback: undo list_lru_isolate + * @lru: the lru pointer. + * @item: the item to put back. + * @nid: the node id of the sublist to put the item back to. + * @memcg: the cgroup of the sublist to put the item back to. + * + * Put back an isolated item into its original LRU. Note that unlike + * list_lru_add, this does not increment the node LRU count (as + * list_lru_isolate does not originally decrement this count). + * + * Since we might have dropped the LRU lock in between, recompute list_lru_one + * from the node's id and memcg. + */ +void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid, + struct mem_cgroup *memcg); typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, struct list_lru_one *list, spinlock_t *lock, void *cb_arg); /** - * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items. + * list_lru_walk_one: walk a @lru, isolating and disposing freeable items. * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. @@ -150,24 +200,24 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. * - * This function will scan all elements in a particular list_lru, calling the + * This function will scan all elements in a particular @lru, calling the * @isolate callback for each of those items, along with the current list * spinlock and a caller-provided opaque. The @isolate callback can choose to * drop the lock internally, but *must* return with the lock held. The callback - * will return an enum lru_status telling the list_lru infrastructure what to + * will return an enum lru_status telling the @lru infrastructure what to * do with the object being scanned. * - * Please note that nr_to_walk does not mean how many objects will be freed, + * Please note that @nr_to_walk does not mean how many objects will be freed, * just how many objects will be scanned. * - * Return value: the number of objects effectively removed from the LRU. + * Return: the number of objects effectively removed from the LRU. */ unsigned long list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk); /** - * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items. + * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items. * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. @@ -176,7 +226,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru, * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. * - * Same as @list_lru_walk_one except that the spinlock is acquired with + * Same as list_lru_walk_one() except that the spinlock is acquired with * spin_lock_irq(). */ unsigned long list_lru_walk_one_irq(struct list_lru *lru, diff --git a/include/linux/llist.h b/include/linux/llist.h index 85bda2d02d65..2c982ff7475a 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -74,6 +74,33 @@ static inline void init_llist_head(struct llist_head *list) } /** + * init_llist_node - initialize lock-less list node + * @node: the node to be initialised + * + * In cases where there is a need to test if a node is on + * a list or not, this initialises the node to clearly + * not be on any list. + */ +static inline void init_llist_node(struct llist_node *node) +{ + node->next = node; +} + +/** + * llist_on_list - test if a lock-list list node is on a list + * @node: the node to test + * + * When a node is on a list the ->next pointer will be NULL or + * some other node. It can never point to itself. We use that + * in init_llist_node() to record that a node is not on any list, + * and here to test whether it is on any list. + */ +static inline bool llist_on_list(const struct llist_node *node) +{ + return node->next != node; +} + +/** * llist_entry - get the struct of this entry * @ptr: the &struct llist_node pointer. * @type: the type of the struct this is embedded in. @@ -249,6 +276,25 @@ static inline struct llist_node *__llist_del_all(struct llist_head *head) extern struct llist_node *llist_del_first(struct llist_head *head); +/** + * llist_del_first_init - delete first entry from lock-list and mark is as being off-list + * @head: the head of lock-less list to delete from. + * + * This behave the same as llist_del_first() except that llist_init_node() is called + * on the returned node so that llist_on_list() will report false for the node. + */ +static inline struct llist_node *llist_del_first_init(struct llist_head *head) +{ + struct llist_node *n = llist_del_first(head); + + if (n) + init_llist_node(n); + return n; +} + +extern bool llist_del_first_this(struct llist_head *head, + struct llist_node *this); + struct llist_node *llist_reverse_order(struct llist_node *head); #endif /* LLIST_H */ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 0f016d69c996..9f565416d186 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -282,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, struct nlm_lock *, struct nlm_cookie *); __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *); -void nlmsvc_retry_blocked(void); +void nlmsvc_retry_blocked(struct svc_rqst *rqstp); void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, nlm_host_match_fn_t match); void nlmsvc_grant_reply(struct nlm_cookie *, __be32); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index dc2844b071c2..08b0d1d9d78b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -82,63 +82,6 @@ struct lock_chain { u64 chain_key; }; -#define MAX_LOCKDEP_KEYS_BITS 13 -#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) -#define INITIAL_CHAIN_KEY -1 - -struct held_lock { - /* - * One-way hash of the dependency chain up to this point. We - * hash the hashes step by step as the dependency chain grows. - * - * We use it for dependency-caching and we skip detection - * passes and dependency-updates if there is a cache-hit, so - * it is absolutely critical for 100% coverage of the validator - * to have a unique key value for every unique dependency path - * that can occur in the system, to make a unique hash value - * as likely as possible - hence the 64-bit width. - * - * The task struct holds the current hash value (initialized - * with zero), here we store the previous hash value: - */ - u64 prev_chain_key; - unsigned long acquire_ip; - struct lockdep_map *instance; - struct lockdep_map *nest_lock; -#ifdef CONFIG_LOCK_STAT - u64 waittime_stamp; - u64 holdtime_stamp; -#endif - /* - * class_idx is zero-indexed; it points to the element in - * lock_classes this held lock instance belongs to. class_idx is in - * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. - */ - unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; - /* - * The lock-stack is unified in that the lock chains of interrupt - * contexts nest ontop of process context chains, but we 'separate' - * the hashes by starting with 0 if we cross into an interrupt - * context, and we also keep do not add cross-context lock - * dependencies - the lock usage graph walking covers that area - * anyway, and we'd just unnecessarily increase the number of - * dependencies otherwise. [Note: hardirq and softirq contexts - * are separated from each other too.] - * - * The following field is used to detect when we cross into an - * interrupt context: - */ - unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ - unsigned int trylock:1; /* 16 bits */ - - unsigned int read:2; /* see lock_acquire() comment */ - unsigned int check:1; /* see lock_acquire() comment */ - unsigned int hardirqs_off:1; - unsigned int sync:1; - unsigned int references:11; /* 32 bits */ - unsigned int pin_count; -}; - /* * Initialization, self-test and debugging-output methods: */ diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 2ebc323d345a..70d30d40ea4a 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -127,12 +127,12 @@ struct lock_class { unsigned long usage_mask; const struct lock_trace *usage_traces[LOCK_TRACE_STATES]; + const char *name; /* * Generation counter, when doing certain classes of graph walking, * to ensure that we check one node only once: */ int name_version; - const char *name; u8 wait_type_inner; u8 wait_type_outer; @@ -198,6 +198,63 @@ struct lockdep_map { struct pin_cookie { unsigned int val; }; +#define MAX_LOCKDEP_KEYS_BITS 13 +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) +#define INITIAL_CHAIN_KEY -1 + +struct held_lock { + /* + * One-way hash of the dependency chain up to this point. We + * hash the hashes step by step as the dependency chain grows. + * + * We use it for dependency-caching and we skip detection + * passes and dependency-updates if there is a cache-hit, so + * it is absolutely critical for 100% coverage of the validator + * to have a unique key value for every unique dependency path + * that can occur in the system, to make a unique hash value + * as likely as possible - hence the 64-bit width. + * + * The task struct holds the current hash value (initialized + * with zero), here we store the previous hash value: + */ + u64 prev_chain_key; + unsigned long acquire_ip; + struct lockdep_map *instance; + struct lockdep_map *nest_lock; +#ifdef CONFIG_LOCK_STAT + u64 waittime_stamp; + u64 holdtime_stamp; +#endif + /* + * class_idx is zero-indexed; it points to the element in + * lock_classes this held lock instance belongs to. class_idx is in + * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. + */ + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; + /* + * The lock-stack is unified in that the lock chains of interrupt + * contexts nest ontop of process context chains, but we 'separate' + * the hashes by starting with 0 if we cross into an interrupt + * context, and we also keep do not add cross-context lock + * dependencies - the lock usage graph walking covers that area + * anyway, and we'd just unnecessarily increase the number of + * dependencies otherwise. [Note: hardirq and softirq contexts + * are separated from each other too.] + * + * The following field is used to detect when we cross into an + * interrupt context: + */ + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; /* 16 bits */ + + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:1; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; + unsigned int sync:1; + unsigned int references:11; /* 32 bits */ + unsigned int pin_count; +}; + #else /* !CONFIG_LOCKDEP */ /* diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h index 54945aa824b4..babf4e3c28ba 100644 --- a/include/linux/logic_pio.h +++ b/include/linux/logic_pio.h @@ -39,9 +39,6 @@ struct logic_pio_host_ops { #ifdef CONFIG_INDIRECT_PIO u8 logic_inb(unsigned long addr); -void logic_outb(u8 value, unsigned long addr); -void logic_outw(u16 value, unsigned long addr); -void logic_outl(u32 value, unsigned long addr); u16 logic_inw(unsigned long addr); u32 logic_inl(unsigned long addr); void logic_outb(u8 value, unsigned long addr); diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index ac962c4cb44b..76458b6d53da 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -43,17 +43,17 @@ LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old, const kernel_cap_t *permitted) LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts) -LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb) +LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, const struct super_block *sb) LSM_HOOK(int, 0, quota_on, struct dentry *dentry) LSM_HOOK(int, 0, syslog, int type) LSM_HOOK(int, 0, settime, const struct timespec64 *ts, const struct timezone *tz) -LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages) +LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages) LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm) -LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file) +LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file) LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) -LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm) -LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm) +LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, const struct linux_binprm *bprm) +LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, const struct linux_binprm *bprm) LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference) LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, struct fs_context *src_sc) @@ -66,7 +66,7 @@ LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts) LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts) LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts) LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts) -LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb) +LSM_HOOK(int, 0, sb_kern_mount, const struct super_block *sb) LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb) LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry) LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path, @@ -171,6 +171,8 @@ LSM_HOOK(int, 0, file_alloc_security, struct file *file) LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file) LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd, unsigned long arg) +LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd, + unsigned long arg) LSM_HOOK(int, 0, mmap_addr, unsigned long addr) LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) @@ -262,6 +264,10 @@ LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops, LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb) LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry, struct inode *inode) +LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr, + struct lsm_ctx __user *ctx, size_t *size, u32 flags) +LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr, + struct lsm_ctx *ctx, size_t size, u32 flags) LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name, char **value) LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) @@ -273,7 +279,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen) LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode) LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen) LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) -LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx, +LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx, u32 *ctxlen) #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) @@ -309,9 +315,9 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how) LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb) -LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock, +LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) -LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock, +LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock, struct sk_buff *skb, u32 *secid) LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority) LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index dcb5e5b5eb13..a2ade0ffe9e7 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -25,6 +25,7 @@ #ifndef __LINUX_LSM_HOOKS_H #define __LINUX_LSM_HOOKS_H +#include <uapi/linux/lsm.h> #include <linux/security.h> #include <linux/init.h> #include <linux/rculist.h> @@ -42,6 +43,18 @@ struct security_hook_heads { #undef LSM_HOOK } __randomize_layout; +/** + * struct lsm_id - Identify a Linux Security Module. + * @lsm: name of the LSM, must be approved by the LSM maintainers + * @id: LSM ID number from uapi/linux/lsm.h + * + * Contains the information that identifies the LSM. + */ +struct lsm_id { + const char *name; + u64 id; +}; + /* * Security module hook list structure. * For use with generic list macros for common operations. @@ -50,7 +63,7 @@ struct security_hook_list { struct hlist_node list; struct hlist_head *head; union security_list_options hook; - const char *lsm; + const struct lsm_id *lsmid; } __randomize_layout; /* @@ -104,7 +117,7 @@ extern struct security_hook_heads security_hook_heads; extern char *lsm_names; extern void security_add_hooks(struct security_hook_list *hooks, int count, - const char *lsm); + const struct lsm_id *lsmid); #define LSM_FLAG_LEGACY_MAJOR BIT(0) #define LSM_FLAG_EXCLUSIVE BIT(1) diff --git a/include/linux/lwq.h b/include/linux/lwq.h new file mode 100644 index 000000000000..d081d5cf8e33 --- /dev/null +++ b/include/linux/lwq.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef LWQ_H +#define LWQ_H +/* + * Light-weight single-linked queue built from llist + * + * Entries can be enqueued from any context with no locking. + * Entries can be dequeued from process context with integrated locking. + * + * This is particularly suitable when work items are queued in + * BH or IRQ context, and where work items are handled one at a time + * by dedicated threads. + */ +#include <linux/container_of.h> +#include <linux/spinlock.h> +#include <linux/llist.h> + +struct lwq_node { + struct llist_node node; +}; + +struct lwq { + spinlock_t lock; + struct llist_node *ready; /* entries to be dequeued */ + struct llist_head new; /* entries being enqueued */ +}; + +/** + * lwq_init - initialise a lwq + * @q: the lwq object + */ +static inline void lwq_init(struct lwq *q) +{ + spin_lock_init(&q->lock); + q->ready = NULL; + init_llist_head(&q->new); +} + +/** + * lwq_empty - test if lwq contains any entry + * @q: the lwq object + * + * This empty test contains an acquire barrier so that if a wakeup + * is sent when lwq_dequeue returns true, it is safe to go to sleep after + * a test on lwq_empty(). + */ +static inline bool lwq_empty(struct lwq *q) +{ + /* acquire ensures ordering wrt lwq_enqueue() */ + return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new); +} + +struct llist_node *__lwq_dequeue(struct lwq *q); +/** + * lwq_dequeue - dequeue first (oldest) entry from lwq + * @q: the queue to dequeue from + * @type: the type of object to return + * @member: them member in returned object which is an lwq_node. + * + * Remove a single object from the lwq and return it. This will take + * a spinlock and so must always be called in the same context, typcially + * process contet. + */ +#define lwq_dequeue(q, type, member) \ + ({ struct llist_node *_n = __lwq_dequeue(q); \ + _n ? container_of(_n, type, member.node) : NULL; }) + +struct llist_node *lwq_dequeue_all(struct lwq *q); + +/** + * lwq_for_each_safe - iterate over detached queue allowing deletion + * @_n: iterator variable + * @_t1: temporary struct llist_node ** + * @_t2: temporary struct llist_node * + * @_l: address of llist_node pointer from lwq_dequeue_all() + * @_member: member in _n where lwq_node is found. + * + * Iterate over members in a dequeued list. If the iterator variable + * is set to NULL, the iterator removes that entry from the queue. + */ +#define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \ + for (_t1 = (_l); \ + *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\ + _t2 = ((*_t1)->next), \ + true) \ + : false; \ + (_n) ? (_t1 = &(_n)->_member.node.next, 0) \ + : ((*(_t1) = (_t2)), 0)) + +/** + * lwq_enqueue - add a new item to the end of the queue + * @n - the lwq_node embedded in the item to be added + * @q - the lwq to append to. + * + * No locking is needed to append to the queue so this can + * be called from any context. + * Return %true is the list may have previously been empty. + */ +static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q) +{ + /* acquire enqures ordering wrt lwq_dequeue */ + return llist_add(&n->node, &q->new) && + smp_load_acquire(&q->ready) == NULL; +} + +/** + * lwq_enqueue_batch - add a list of new items to the end of the queue + * @n - the lwq_node embedded in the first item to be added + * @q - the lwq to append to. + * + * No locking is needed to append to the queue so this can + * be called from any context. + * Return %true is the list may have previously been empty. + */ +static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q) +{ + struct llist_node *e = n; + + /* acquire enqures ordering wrt lwq_dequeue */ + return llist_add_batch(llist_reverse_order(n), e, &q->new) && + smp_load_acquire(&q->ready) == NULL; +} +#endif /* LWQ_H */ diff --git a/include/linux/maple.h b/include/linux/maple.h index 9b140272ee16..9aae44efcfd4 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -5,7 +5,6 @@ #include <mach/maple.h> struct device; -extern struct bus_type maple_bus_type; /* Maple Bus command and response codes */ enum maple_code { diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index e41c70ac7744..b3d63123b945 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -256,6 +256,8 @@ struct maple_tree { struct maple_tree name = MTREE_INIT(name, 0) #define mtree_lock(mt) spin_lock((&(mt)->ma_lock)) +#define mtree_lock_nested(mas, subclass) \ + spin_lock_nested((&(mt)->ma_lock), subclass) #define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock)) /* @@ -327,6 +329,9 @@ int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, gfp_t gfp); void *mtree_erase(struct maple_tree *mt, unsigned long index); +int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp); +int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp); + void mtree_destroy(struct maple_tree *mt); void __mt_destroy(struct maple_tree *mt); @@ -345,6 +350,36 @@ static inline bool mtree_empty(const struct maple_tree *mt) /* Advanced API */ /* + * Maple State Status + * ma_active means the maple state is pointing to a node and offset and can + * continue operating on the tree. + * ma_start means we have not searched the tree. + * ma_root means we have searched the tree and the entry we found lives in + * the root of the tree (ie it has index 0, length 1 and is the only entry in + * the tree). + * ma_none means we have searched the tree and there is no node in the + * tree for this entry. For example, we searched for index 1 in an empty + * tree. Or we have a tree which points to a full leaf node and we + * searched for an entry which is larger than can be contained in that + * leaf node. + * ma_pause means the data within the maple state may be stale, restart the + * operation + * ma_overflow means the search has reached the upper limit of the search + * ma_underflow means the search has reached the lower limit of the search + * ma_error means there was an error, check the node for the error number. + */ +enum maple_status { + ma_active, + ma_start, + ma_root, + ma_none, + ma_pause, + ma_overflow, + ma_underflow, + ma_error, +}; + +/* * The maple state is defined in the struct ma_state and is used to keep track * of information during operations, and even between operations when using the * advanced API. @@ -376,6 +411,13 @@ static inline bool mtree_empty(const struct maple_tree *mt) * When returning a value the maple state index and last respectively contain * the start and end of the range for the entry. Ranges are inclusive in the * Maple Tree. + * + * The status of the state is used to determine how the next action should treat + * the state. For instance, if the status is ma_start then the next action + * should start at the root of the tree and walk down. If the status is + * ma_pause then the node may be stale data and should be discarded. If the + * status is ma_overflow, then the last action hit the upper limit. + * */ struct ma_state { struct maple_tree *tree; /* The tree we're operating in */ @@ -385,9 +427,11 @@ struct ma_state { unsigned long min; /* The minimum index of this node - implied pivot min */ unsigned long max; /* The maximum index of this node - implied pivot max */ struct maple_alloc *alloc; /* Allocated nodes for this operation */ + enum maple_status status; /* The status of the state (active, start, none, etc) */ unsigned char depth; /* depth of tree descent during write */ unsigned char offset; unsigned char mas_flags; + unsigned char end; /* The end of the node */ }; struct ma_wr_state { @@ -397,7 +441,6 @@ struct ma_wr_state { unsigned long r_max; /* range max */ enum maple_type type; /* mas->node type */ unsigned char offset_end; /* The offset where the write ends */ - unsigned char node_end; /* mas->node end */ unsigned long *pivots; /* mas->node->pivots pointer */ unsigned long end_piv; /* The pivot at the offset end */ void __rcu **slots; /* mas->node->slots pointer */ @@ -406,28 +449,16 @@ struct ma_wr_state { }; #define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock)) +#define mas_lock_nested(mas, subclass) \ + spin_lock_nested(&((mas)->tree->ma_lock), subclass) #define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock)) - /* * Special values for ma_state.node. - * MAS_START means we have not searched the tree. - * MAS_ROOT means we have searched the tree and the entry we found lives in - * the root of the tree (ie it has index 0, length 1 and is the only entry in - * the tree). - * MAS_NONE means we have searched the tree and there is no node in the - * tree for this entry. For example, we searched for index 1 in an empty - * tree. Or we have a tree which points to a full leaf node and we - * searched for an entry which is larger than can be contained in that - * leaf node. * MA_ERROR represents an errno. After dropping the lock and attempting * to resolve the error, the walk would have to be restarted from the * top of the tree as the tree may have been modified. */ -#define MAS_START ((struct maple_enode *)1UL) -#define MAS_ROOT ((struct maple_enode *)5UL) -#define MAS_NONE ((struct maple_enode *)9UL) -#define MAS_PAUSE ((struct maple_enode *)17UL) #define MA_ERROR(err) \ ((struct maple_enode *)(((unsigned long)err << 2) | 2UL)) @@ -436,7 +467,8 @@ struct ma_wr_state { .tree = mt, \ .index = first, \ .last = end, \ - .node = MAS_START, \ + .node = NULL, \ + .status = ma_start, \ .min = 0, \ .max = ULONG_MAX, \ .alloc = NULL, \ @@ -467,7 +499,6 @@ void *mas_find_range(struct ma_state *mas, unsigned long max); void *mas_find_rev(struct ma_state *mas, unsigned long min); void *mas_find_range_rev(struct ma_state *mas, unsigned long max); int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp); -bool mas_is_err(struct ma_state *mas); bool mas_nomem(struct ma_state *mas, gfp_t gfp); void mas_pause(struct ma_state *mas); @@ -496,19 +527,18 @@ static inline void mas_init(struct ma_state *mas, struct maple_tree *tree, mas->tree = tree; mas->index = mas->last = addr; mas->max = ULONG_MAX; - mas->node = MAS_START; + mas->status = ma_start; + mas->node = NULL; } -/* Checks if a mas has not found anything */ -static inline bool mas_is_none(const struct ma_state *mas) +static inline bool mas_is_active(struct ma_state *mas) { - return mas->node == MAS_NONE; + return mas->status == ma_active; } -/* Checks if a mas has been paused */ -static inline bool mas_is_paused(const struct ma_state *mas) +static inline bool mas_is_err(struct ma_state *mas) { - return mas->node == MAS_PAUSE; + return mas->status == ma_error; } /** @@ -521,9 +551,10 @@ static inline bool mas_is_paused(const struct ma_state *mas) * * Context: Any context. */ -static inline void mas_reset(struct ma_state *mas) +static __always_inline void mas_reset(struct ma_state *mas) { - mas->node = MAS_START; + mas->status = ma_start; + mas->node = NULL; } /** @@ -539,6 +570,131 @@ static inline void mas_reset(struct ma_state *mas) */ #define mas_for_each(__mas, __entry, __max) \ while (((__entry) = mas_find((__mas), (__max))) != NULL) + +#ifdef CONFIG_DEBUG_MAPLE_TREE +enum mt_dump_format { + mt_dump_dec, + mt_dump_hex, +}; + +extern atomic_t maple_tree_tests_run; +extern atomic_t maple_tree_tests_passed; + +void mt_dump(const struct maple_tree *mt, enum mt_dump_format format); +void mas_dump(const struct ma_state *mas); +void mas_wr_dump(const struct ma_wr_state *wr_mas); +void mt_validate(struct maple_tree *mt); +void mt_cache_shrink(void); +#define MT_BUG_ON(__tree, __x) do { \ + atomic_inc(&maple_tree_tests_run); \ + if (__x) { \ + pr_info("BUG at %s:%d (%u)\n", \ + __func__, __LINE__, __x); \ + mt_dump(__tree, mt_dump_hex); \ + pr_info("Pass: %u Run:%u\n", \ + atomic_read(&maple_tree_tests_passed), \ + atomic_read(&maple_tree_tests_run)); \ + dump_stack(); \ + } else { \ + atomic_inc(&maple_tree_tests_passed); \ + } \ +} while (0) + +#define MAS_BUG_ON(__mas, __x) do { \ + atomic_inc(&maple_tree_tests_run); \ + if (__x) { \ + pr_info("BUG at %s:%d (%u)\n", \ + __func__, __LINE__, __x); \ + mas_dump(__mas); \ + mt_dump((__mas)->tree, mt_dump_hex); \ + pr_info("Pass: %u Run:%u\n", \ + atomic_read(&maple_tree_tests_passed), \ + atomic_read(&maple_tree_tests_run)); \ + dump_stack(); \ + } else { \ + atomic_inc(&maple_tree_tests_passed); \ + } \ +} while (0) + +#define MAS_WR_BUG_ON(__wrmas, __x) do { \ + atomic_inc(&maple_tree_tests_run); \ + if (__x) { \ + pr_info("BUG at %s:%d (%u)\n", \ + __func__, __LINE__, __x); \ + mas_wr_dump(__wrmas); \ + mas_dump((__wrmas)->mas); \ + mt_dump((__wrmas)->mas->tree, mt_dump_hex); \ + pr_info("Pass: %u Run:%u\n", \ + atomic_read(&maple_tree_tests_passed), \ + atomic_read(&maple_tree_tests_run)); \ + dump_stack(); \ + } else { \ + atomic_inc(&maple_tree_tests_passed); \ + } \ +} while (0) + +#define MT_WARN_ON(__tree, __x) ({ \ + int ret = !!(__x); \ + atomic_inc(&maple_tree_tests_run); \ + if (ret) { \ + pr_info("WARN at %s:%d (%u)\n", \ + __func__, __LINE__, __x); \ + mt_dump(__tree, mt_dump_hex); \ + pr_info("Pass: %u Run:%u\n", \ + atomic_read(&maple_tree_tests_passed), \ + atomic_read(&maple_tree_tests_run)); \ + dump_stack(); \ + } else { \ + atomic_inc(&maple_tree_tests_passed); \ + } \ + unlikely(ret); \ +}) + +#define MAS_WARN_ON(__mas, __x) ({ \ + int ret = !!(__x); \ + atomic_inc(&maple_tree_tests_run); \ + if (ret) { \ + pr_info("WARN at %s:%d (%u)\n", \ + __func__, __LINE__, __x); \ + mas_dump(__mas); \ + mt_dump((__mas)->tree, mt_dump_hex); \ + pr_info("Pass: %u Run:%u\n", \ + atomic_read(&maple_tree_tests_passed), \ + atomic_read(&maple_tree_tests_run)); \ + dump_stack(); \ + } else { \ + atomic_inc(&maple_tree_tests_passed); \ + } \ + unlikely(ret); \ +}) + +#define MAS_WR_WARN_ON(__wrmas, __x) ({ \ + int ret = !!(__x); \ + atomic_inc(&maple_tree_tests_run); \ + if (ret) { \ + pr_info("WARN at %s:%d (%u)\n", \ + __func__, __LINE__, __x); \ + mas_wr_dump(__wrmas); \ + mas_dump((__wrmas)->mas); \ + mt_dump((__wrmas)->mas->tree, mt_dump_hex); \ + pr_info("Pass: %u Run:%u\n", \ + atomic_read(&maple_tree_tests_passed), \ + atomic_read(&maple_tree_tests_run)); \ + dump_stack(); \ + } else { \ + atomic_inc(&maple_tree_tests_passed); \ + } \ + unlikely(ret); \ +}) +#else +#define MT_BUG_ON(__tree, __x) BUG_ON(__x) +#define MAS_BUG_ON(__mas, __x) BUG_ON(__x) +#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x) +#define MT_WARN_ON(__tree, __x) WARN_ON(__x) +#define MAS_WARN_ON(__mas, __x) WARN_ON(__x) +#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x) +#endif /* CONFIG_DEBUG_MAPLE_TREE */ + /** * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the * current location. @@ -552,6 +708,9 @@ static inline void mas_reset(struct ma_state *mas) static inline void __mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last) { + /* Ensure the range starts within the current slot */ + MAS_WARN_ON(mas, mas_is_active(mas) && + (mas->index > start || mas->last < start)); mas->index = start; mas->last = last; } @@ -569,8 +728,8 @@ static inline void __mas_set_range(struct ma_state *mas, unsigned long start, static inline void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last) { + mas_reset(mas); __mas_set_range(mas, start, last); - mas->node = MAS_START; } /** @@ -695,129 +854,4 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max); for (__entry = mt_find(__tree, &(__index), __max); \ __entry; __entry = mt_find_after(__tree, &(__index), __max)) - -#ifdef CONFIG_DEBUG_MAPLE_TREE -enum mt_dump_format { - mt_dump_dec, - mt_dump_hex, -}; - -extern atomic_t maple_tree_tests_run; -extern atomic_t maple_tree_tests_passed; - -void mt_dump(const struct maple_tree *mt, enum mt_dump_format format); -void mas_dump(const struct ma_state *mas); -void mas_wr_dump(const struct ma_wr_state *wr_mas); -void mt_validate(struct maple_tree *mt); -void mt_cache_shrink(void); -#define MT_BUG_ON(__tree, __x) do { \ - atomic_inc(&maple_tree_tests_run); \ - if (__x) { \ - pr_info("BUG at %s:%d (%u)\n", \ - __func__, __LINE__, __x); \ - mt_dump(__tree, mt_dump_hex); \ - pr_info("Pass: %u Run:%u\n", \ - atomic_read(&maple_tree_tests_passed), \ - atomic_read(&maple_tree_tests_run)); \ - dump_stack(); \ - } else { \ - atomic_inc(&maple_tree_tests_passed); \ - } \ -} while (0) - -#define MAS_BUG_ON(__mas, __x) do { \ - atomic_inc(&maple_tree_tests_run); \ - if (__x) { \ - pr_info("BUG at %s:%d (%u)\n", \ - __func__, __LINE__, __x); \ - mas_dump(__mas); \ - mt_dump((__mas)->tree, mt_dump_hex); \ - pr_info("Pass: %u Run:%u\n", \ - atomic_read(&maple_tree_tests_passed), \ - atomic_read(&maple_tree_tests_run)); \ - dump_stack(); \ - } else { \ - atomic_inc(&maple_tree_tests_passed); \ - } \ -} while (0) - -#define MAS_WR_BUG_ON(__wrmas, __x) do { \ - atomic_inc(&maple_tree_tests_run); \ - if (__x) { \ - pr_info("BUG at %s:%d (%u)\n", \ - __func__, __LINE__, __x); \ - mas_wr_dump(__wrmas); \ - mas_dump((__wrmas)->mas); \ - mt_dump((__wrmas)->mas->tree, mt_dump_hex); \ - pr_info("Pass: %u Run:%u\n", \ - atomic_read(&maple_tree_tests_passed), \ - atomic_read(&maple_tree_tests_run)); \ - dump_stack(); \ - } else { \ - atomic_inc(&maple_tree_tests_passed); \ - } \ -} while (0) - -#define MT_WARN_ON(__tree, __x) ({ \ - int ret = !!(__x); \ - atomic_inc(&maple_tree_tests_run); \ - if (ret) { \ - pr_info("WARN at %s:%d (%u)\n", \ - __func__, __LINE__, __x); \ - mt_dump(__tree, mt_dump_hex); \ - pr_info("Pass: %u Run:%u\n", \ - atomic_read(&maple_tree_tests_passed), \ - atomic_read(&maple_tree_tests_run)); \ - dump_stack(); \ - } else { \ - atomic_inc(&maple_tree_tests_passed); \ - } \ - unlikely(ret); \ -}) - -#define MAS_WARN_ON(__mas, __x) ({ \ - int ret = !!(__x); \ - atomic_inc(&maple_tree_tests_run); \ - if (ret) { \ - pr_info("WARN at %s:%d (%u)\n", \ - __func__, __LINE__, __x); \ - mas_dump(__mas); \ - mt_dump((__mas)->tree, mt_dump_hex); \ - pr_info("Pass: %u Run:%u\n", \ - atomic_read(&maple_tree_tests_passed), \ - atomic_read(&maple_tree_tests_run)); \ - dump_stack(); \ - } else { \ - atomic_inc(&maple_tree_tests_passed); \ - } \ - unlikely(ret); \ -}) - -#define MAS_WR_WARN_ON(__wrmas, __x) ({ \ - int ret = !!(__x); \ - atomic_inc(&maple_tree_tests_run); \ - if (ret) { \ - pr_info("WARN at %s:%d (%u)\n", \ - __func__, __LINE__, __x); \ - mas_wr_dump(__wrmas); \ - mas_dump((__wrmas)->mas); \ - mt_dump((__wrmas)->mas->tree, mt_dump_hex); \ - pr_info("Pass: %u Run:%u\n", \ - atomic_read(&maple_tree_tests_passed), \ - atomic_read(&maple_tree_tests_run)); \ - dump_stack(); \ - } else { \ - atomic_inc(&maple_tree_tests_passed); \ - } \ - unlikely(ret); \ -}) -#else -#define MT_BUG_ON(__tree, __x) BUG_ON(__x) -#define MAS_BUG_ON(__mas, __x) BUG_ON(__x) -#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x) -#define MT_WARN_ON(__tree, __x) WARN_ON(__x) -#define MAS_WARN_ON(__mas, __x) WARN_ON(__x) -#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x) -#endif /* CONFIG_DEBUG_MAPLE_TREE */ - #endif /*_LINUX_MAPLE_TREE_H */ diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index b0da04fe087b..34dfcc77f505 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -126,10 +126,11 @@ struct cmos_rtc_board_info { #endif /* ARCH_RTC_LOCATION */ bool mc146818_does_rtc_work(void); -int mc146818_get_time(struct rtc_time *time); +int mc146818_get_time(struct rtc_time *time, int timeout); int mc146818_set_time(struct rtc_time *time); bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param), + int timeout, void *param); #endif /* _MC146818RTC_H */ diff --git a/include/linux/mcb.h b/include/linux/mcb.h index 1e5893138afe..0b971b24a804 100644 --- a/include/linux/mcb.h +++ b/include/linux/mcb.h @@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev) struct mcb_device { struct device dev; struct mcb_bus *bus; - bool is_added; struct mcb_driver *driver; u16 id; int inst; diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 8fa23bdcedbf..79ceee3c8673 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -38,6 +38,7 @@ struct mdio_device { /* Bus address of the MDIO device (0-31) */ int addr; int flags; + int reset_state; struct gpio_desc *reset_gpio; struct reset_control *reset_ctrl; unsigned int reset_assert_delay; @@ -420,7 +421,7 @@ static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising) * A function that translates value of following registers to the linkmode: * IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20) * IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60) - * IEEE 802.3-2018 45.2.7.14 "EEE "link partner ability 1 register (7.61) + * IEEE 802.3-2018 45.2.7.14 "EEE link partner ability 1" register (7.61) */ static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val) { diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index fd6e0620658d..b38a56a13f39 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -31,11 +31,11 @@ typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); * @rx_work: async work to execute Rx event callback * @rx_cb: Drivers register this callback to get asynchronous ME * Rx buffer pending notifications. - * @notif_work: async work to execute FW notif event callback + * @notif_work: async work to execute FW notify event callback * @notif_cb: Drivers register this callback to get asynchronous ME * FW notification pending notifications. * - * @do_match: wheather device can be matched with a driver + * @do_match: whether the device can be matched with a driver * @is_added: device is already scanned * @priv_data: client private data */ @@ -94,15 +94,23 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length); +ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, + size_t length, unsigned long timeout); ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length, + unsigned long timeout); ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, size_t length, u8 vtag); +ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf, + size_t length, u8 vtag, unsigned long timeout); ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, u8 *vtag); ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, u8 *vtag); +ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 *vtag, unsigned long timeout); int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 1c1072e3ca06..b695f9e946da 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -40,6 +40,8 @@ extern unsigned long long max_possible_pfn; * via a driver, and never indicated in the firmware-provided memory map as * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the * kernel resource tree. + * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are + * not initialized (only for reserved regions). */ enum memblock_flags { MEMBLOCK_NONE = 0x0, /* No special request */ @@ -47,6 +49,7 @@ enum memblock_flags { MEMBLOCK_MIRROR = 0x2, /* mirrored region */ MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */ + MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */ }; /** @@ -120,11 +123,13 @@ int memblock_physmem_add(phys_addr_t base, phys_addr_t size); void memblock_trim_memory(phys_addr_t align); bool memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); +bool memblock_validate_numa_coverage(unsigned long threshold_bytes); int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); +int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size); void memblock_free_all(void); void memblock_free(void *ptr, size_t size); @@ -259,6 +264,11 @@ static inline bool memblock_is_nomap(struct memblock_region *m) return m->flags & MEMBLOCK_NOMAP; } +static inline bool memblock_is_reserved_noinit(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_RSRV_NOINIT; +} + static inline bool memblock_is_driver_managed(struct memblock_region *m) { return m->flags & MEMBLOCK_DRIVER_MANAGED; diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ab94ad4597d0..20ff87f8e001 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -21,6 +21,7 @@ #include <linux/vmstat.h> #include <linux/writeback.h> #include <linux/page-flags.h> +#include <linux/shrinker.h> struct mem_cgroup; struct obj_cgroup; @@ -88,17 +89,6 @@ struct mem_cgroup_reclaim_iter { unsigned int generation; }; -/* - * Bitmap and deferred work of shrinker::id corresponding to memcg-aware - * shrinkers, which have elements charged to this memcg. - */ -struct shrinker_info { - struct rcu_head rcu; - atomic_long_t *nr_deferred; - unsigned long *map; - int map_nr_max; -}; - struct lruvec_stats_percpu { /* Local (CPU and cgroup) state */ long state[NR_VM_NODE_STAT_ITEMS]; @@ -153,7 +143,7 @@ struct mem_cgroup_threshold_ary { /* Size of entries[] */ unsigned int size; /* Array of thresholds */ - struct mem_cgroup_threshold entries[]; + struct mem_cgroup_threshold entries[] __counted_by(size); }; struct mem_cgroup_thresholds { @@ -229,6 +219,12 @@ struct mem_cgroup { #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) unsigned long zswap_max; + + /* + * Prevent pages from this memcg from being written back from zswap to + * swap, and from being swapped out on zswap store failures. + */ + bool zswap_writeback; #endif unsigned long soft_limit; @@ -299,7 +295,13 @@ struct mem_cgroup { #ifdef CONFIG_MEMCG_KMEM int kmemcg_id; - struct obj_cgroup __rcu *objcg; + /* + * memcg->objcg is wiped out as a part of the objcg repaprenting + * process. memcg->orig_objcg preserves a pointer (and a reference) + * to the original objcg until the end of live of memcg. + */ + struct obj_cgroup __rcu *objcg; + struct obj_cgroup *orig_objcg; /* list of inherited objcgs, protected by objcg_lock */ struct list_head objcg_list; #endif @@ -328,7 +330,7 @@ struct mem_cgroup { struct deferred_split deferred_split_queue; #endif -#ifdef CONFIG_LRU_GEN +#ifdef CONFIG_LRU_GEN_WALKS_MMU /* per-memcg mm_struct list */ struct lru_gen_mm_list mm_list; #endif @@ -662,6 +664,8 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, page_counter_read(&memcg->memory); } +void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg); + int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); /** @@ -686,6 +690,9 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, return __mem_cgroup_charge(folio, mm, gfp); } +int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, + long nr_pages); + int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); @@ -713,6 +720,10 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) __mem_cgroup_uncharge_list(page_list); } +void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages); + +void mem_cgroup_replace_folio(struct folio *old, struct folio *new); + void mem_cgroup_migrate(struct folio *old, struct folio *new); /** @@ -769,6 +780,8 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); +struct mem_cgroup *get_mem_cgroup_from_current(void); + struct lruvec *folio_lruvec_lock(struct folio *folio); struct lruvec *folio_lruvec_lock_irq(struct folio *folio); struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, @@ -814,6 +827,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) return !memcg || css_tryget(&memcg->css); } +static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) +{ + return !memcg || css_tryget_online(&memcg->css); +} + static inline void mem_cgroup_put(struct mem_cgroup *memcg) { if (memcg) @@ -920,7 +938,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); } -void mem_cgroup_handle_over_high(void); +void mem_cgroup_handle_over_high(gfp_t gfp_mask); unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); @@ -1039,8 +1057,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return x; } -void mem_cgroup_flush_stats(void); -void mem_cgroup_flush_stats_ratelimited(void); +void mem_cgroup_flush_stats(struct mem_cgroup *memcg); +void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg); void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); @@ -1080,15 +1098,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg, local_irq_restore(flags); } -static inline void count_memcg_page_event(struct page *page, - enum vm_event_item idx) -{ - struct mem_cgroup *memcg = page_memcg(page); - - if (memcg) - count_memcg_events(memcg, idx, 1); -} - static inline void count_memcg_folio_events(struct folio *folio, enum vm_event_item idx, unsigned long nr) { @@ -1189,6 +1198,11 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page) return NULL; } +static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) +{ + return NULL; +} + static inline bool folio_memcg_kmem(struct folio *folio) { return false; @@ -1249,12 +1263,23 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, return false; } +static inline void mem_cgroup_commit_charge(struct folio *folio, + struct mem_cgroup *memcg) +{ +} + static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) { return 0; } +static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, + gfp_t gfp, long nr_pages) +{ + return 0; +} + static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) { @@ -1273,6 +1298,16 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) { } +static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, + unsigned int nr_pages) +{ +} + +static inline void mem_cgroup_replace_folio(struct folio *old, + struct folio *new) +{ +} + static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) { } @@ -1310,6 +1345,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) return NULL; } +static inline struct mem_cgroup *get_mem_cgroup_from_current(void) +{ + return NULL; +} + static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) { @@ -1325,6 +1365,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) return true; } +static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) +{ + return true; +} + static inline void mem_cgroup_put(struct mem_cgroup *memcg) { } @@ -1458,7 +1503,7 @@ static inline void mem_cgroup_unlock_pages(void) rcu_read_unlock(); } -static inline void mem_cgroup_handle_over_high(void) +static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) { } @@ -1524,11 +1569,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return node_page_state(lruvec_pgdat(lruvec), idx); } -static inline void mem_cgroup_flush_stats(void) +static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg) { } -static inline void mem_cgroup_flush_stats_ratelimited(void) +static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) { } @@ -1565,11 +1610,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg, { } -static inline void count_memcg_page_event(struct page *page, - int idx) -{ -} - static inline void count_memcg_folio_events(struct folio *folio, enum vm_event_item idx, unsigned long nr) { @@ -1763,9 +1803,27 @@ bool mem_cgroup_kmem_disabled(void); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); -struct obj_cgroup *get_obj_cgroup_from_current(void); +/* + * The returned objcg pointer is safe to use without additional + * protection within a scope. The scope is defined either by + * the current task (similar to the "current" global variable) + * or by set_active_memcg() pair. + * Please, use obj_cgroup_get() to get a reference if the pointer + * needs to be used outside of the local scope. + */ +struct obj_cgroup *current_obj_cgroup(void); struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); +static inline struct obj_cgroup *get_obj_cgroup_from_current(void) +{ + struct obj_cgroup *objcg = current_obj_cgroup(); + + if (objcg) + obj_cgroup_get(objcg); + + return objcg; +} + int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); @@ -1889,6 +1947,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg, bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); +bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg); #else static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) { @@ -1902,6 +1961,11 @@ static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) { } +static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) +{ + /* if zswap is disabled, do not block pages going to the swapping device */ + return true; +} #endif #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h index 437441cdf78f..69e781900082 100644 --- a/include/linux/memory-tiers.h +++ b/include/linux/memory-tiers.h @@ -6,6 +6,7 @@ #include <linux/nodemask.h> #include <linux/kref.h> #include <linux/mmzone.h> +#include <linux/notifier.h> /* * Each tier cover a abstrace distance chunk size of 128 */ @@ -22,7 +23,9 @@ struct memory_tier; struct memory_dev_type { /* list of memory types that are part of same tier as this type */ - struct list_head tier_sibiling; + struct list_head tier_sibling; + /* list of memory types that are managed by one driver */ + struct list_head list; /* abstract distance for this specific memory type */ int adistance; /* Nodes of same abstract distance */ @@ -30,12 +33,21 @@ struct memory_dev_type { struct kref kref; }; +struct access_coordinate; + #ifdef CONFIG_NUMA extern bool numa_demotion_enabled; +extern struct memory_dev_type *default_dram_type; struct memory_dev_type *alloc_memory_type(int adistance); void put_memory_type(struct memory_dev_type *memtype); void init_node_memory_type(int node, struct memory_dev_type *default_type); void clear_node_memory_type(int node, struct memory_dev_type *memtype); +int register_mt_adistance_algorithm(struct notifier_block *nb); +int unregister_mt_adistance_algorithm(struct notifier_block *nb); +int mt_calc_adistance(int node, int *adist); +int mt_set_default_dram_perf(int nid, struct access_coordinate *perf, + const char *source); +int mt_perf_to_adistance(struct access_coordinate *perf, int *adist); #ifdef CONFIG_MIGRATION int next_demotion_node(int node); void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets); @@ -60,6 +72,7 @@ static inline bool node_is_toptier(int node) #else #define numa_demotion_enabled false +#define default_dram_type NULL /* * CONFIG_NUMA implementation returns non NULL error. */ @@ -97,5 +110,31 @@ static inline bool node_is_toptier(int node) { return true; } + +static inline int register_mt_adistance_algorithm(struct notifier_block *nb) +{ + return 0; +} + +static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb) +{ + return 0; +} + +static inline int mt_calc_adistance(int node, int *adist) +{ + return NOTIFY_DONE; +} + +static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf, + const char *source) +{ + return -EIO; +} + +static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist) +{ + return -EIO; +} #endif /* CONFIG_NUMA */ #endif /* _LINUX_MEMORY_TIERS_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index d232de7cdc56..931b118336f4 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -17,6 +17,8 @@ struct mm_struct; +#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */ + #ifdef CONFIG_NUMA /* @@ -89,8 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol) return pol; } -#define vma_policy(vma) ((vma)->vm_policy) - static inline void mpol_get(struct mempolicy *pol) { if (pol) @@ -107,35 +107,30 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) /* * Tree of shared policies for a shared memory region. - * Maintain the policies in a pseudo mm that contains vmas. The vmas - * carry the policy. As a special twist the pseudo mm is indexed in pages, not - * bytes, so that we can work with shared memory segments bigger than - * unsigned long. */ - -struct sp_node { - struct rb_node nd; - unsigned long start, end; - struct mempolicy *policy; -}; - struct shared_policy { struct rb_root root; rwlock_t lock; }; +struct sp_node { + struct rb_node nd; + pgoff_t start, end; + struct mempolicy *policy; +}; int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); -int mpol_set_shared_policy(struct shared_policy *info, - struct vm_area_struct *vma, - struct mempolicy *new); -void mpol_free_shared_policy(struct shared_policy *p); +int mpol_set_shared_policy(struct shared_policy *sp, + struct vm_area_struct *vma, struct mempolicy *mpol); +void mpol_free_shared_policy(struct shared_policy *sp); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, - unsigned long idx); + pgoff_t idx); struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, - unsigned long addr); + unsigned long addr, pgoff_t *ilx); +struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr, int order, pgoff_t *ilx); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); @@ -149,8 +144,6 @@ extern int huge_node(struct vm_area_struct *vma, extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_in_oom_domain(struct task_struct *tsk, const nodemask_t *mask); -extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); - extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone; @@ -174,7 +167,7 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ extern bool vma_migratable(struct vm_area_struct *vma); -extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); static inline bool mpol_is_preferred_many(struct mempolicy *pol) @@ -188,12 +181,17 @@ extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone); struct mempolicy {}; +static inline struct mempolicy *get_task_policy(struct task_struct *p) +{ + return NULL; +} + static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { return true; } -static inline void mpol_put(struct mempolicy *p) +static inline void mpol_put(struct mempolicy *pol) { } @@ -212,17 +210,22 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp, { } -static inline void mpol_free_shared_policy(struct shared_policy *p) +static inline void mpol_free_shared_policy(struct shared_policy *sp) { } static inline struct mempolicy * -mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) +mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx) { return NULL; } -#define vma_policy(vma) NULL +static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr, int order, pgoff_t *ilx) +{ + *ilx = 0; + return NULL; +} static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) @@ -278,7 +281,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol) } #endif -static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, +static inline int mpol_misplaced(struct folio *folio, + struct vm_area_struct *vma, unsigned long address) { return -1; /* no node preference */ diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 4aae6c06c5f2..7be1e32e6d42 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -51,6 +51,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, extern int mempool_resize(mempool_t *pool, int new_min_nr); extern void mempool_destroy(mempool_t *pool); extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; +extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc; extern void mempool_free(void *element, mempool_t *pool); /* diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 1314d9c5f05b..744c830f4b13 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -196,8 +196,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap); bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); -unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); -void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); unsigned long memremap_compat_align(void); #else static inline void *devm_memremap_pages(struct device *dev, @@ -228,16 +226,6 @@ static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) return false; } -static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) -{ - return 0; -} - -static inline void vmem_altmap_free(struct vmem_altmap *altmap, - unsigned long nr_pfns) -{ -} - /* when memremap_pages() is disabled all archs can remap a single page */ static inline unsigned long memremap_compat_align(void) { diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h index 473545a2c425..6fa21791fc85 100644 --- a/include/linux/mfd/88pm860x.h +++ b/include/linux/mfd/88pm860x.h @@ -472,13 +472,7 @@ extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *); extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *); extern int pm860x_set_bits(struct i2c_client *, int, unsigned char, unsigned char); -extern int pm860x_page_reg_read(struct i2c_client *, int); extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char); extern int pm860x_page_bulk_read(struct i2c_client *, int, int, unsigned char *); -extern int pm860x_page_bulk_write(struct i2c_client *, int, int, - unsigned char *); -extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char, - unsigned char); - #endif /* __LINUX_MFD_88PM860X_H */ diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h index 302a330c5c84..76d326ea8eba 100644 --- a/include/linux/mfd/abx500/ab8500.h +++ b/include/linux/mfd/abx500/ab8500.h @@ -382,10 +382,6 @@ struct ab8500_platform_data { struct ab8500_sysctrl_platform_data *sysctrl; }; -extern int ab8500_init(struct ab8500 *ab8500, - enum ab8500_version version); -extern int ab8500_exit(struct ab8500 *ab8500); - extern int ab8500_suspend(struct ab8500 *ab8500); static inline int is_ab8500(struct ab8500 *ab) @@ -503,13 +499,7 @@ static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab) void ab8500_override_turn_on_stat(u8 mask, u8 set); -#ifdef CONFIG_AB8500_DEBUG -extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); -void ab8500_dump_all_banks(struct device *dev); -void ab8500_debug_register_interrupt(int line); -#else static inline void ab8500_dump_all_banks(struct device *dev) {} static inline void ab8500_debug_register_interrupt(int line) {} -#endif #endif /* MFD_AB8500_H */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 47e7a3a61ce6..e8bcad641d8c 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -92,7 +92,7 @@ struct mfd_cell { * (above) when matching OF nodes with devices that have identical * compatible strings */ - const u64 of_reg; + u64 of_reg; /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */ bool use_of_reg; diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h index e7a7e70fdb38..dd0fc891b228 100644 --- a/include/linux/mfd/dbx500-prcmu.h +++ b/include/linux/mfd/dbx500-prcmu.h @@ -556,16 +556,6 @@ static inline void prcmu_clear(unsigned int reg, u32 bits) #define PRCMU_QOS_ARM_OPP 3 #define PRCMU_QOS_DEFAULT_VALUE -1 -static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void) -{ - return 0; -} - -static inline int prcmu_qos_requirement(int prcmu_qos_class) -{ - return 0; -} - static inline int prcmu_qos_add_requirement(int prcmu_qos_class, char *name, s32 value) { @@ -582,15 +572,4 @@ static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name) { } -static inline int prcmu_qos_add_notifier(int prcmu_qos_class, - struct notifier_block *notifier) -{ - return 0; -} -static inline int prcmu_qos_remove_notifier(int prcmu_qos_class, - struct notifier_block *notifier) -{ - return 0; -} - #endif /* __MACH_PRCMU_H */ diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h index 6a012784dd1b..194556851ccf 100644 --- a/include/linux/mfd/hi655x-pmic.h +++ b/include/linux/mfd/hi655x-pmic.h @@ -52,7 +52,6 @@ #define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT) struct hi655x_pmic { - struct resource *res; struct device *dev; struct regmap *regmap; struct gpio_desc *gpio; diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h index ea4a4b1b246a..1fbda1f8967d 100644 --- a/include/linux/mfd/lpc_ich.h +++ b/include/linux/mfd/lpc_ich.h @@ -15,7 +15,7 @@ #define ICH_RES_GPE0 1 /* GPIO compatibility */ -enum { +enum lpc_gpio_versions { ICH_I3100_GPIO, ICH_V5_GPIO, ICH_V6_GPIO, @@ -26,11 +26,14 @@ enum { AVOTON_GPIO, }; +struct lpc_ich_gpio_info; + struct lpc_ich_info { char name[32]; unsigned int iTCO_version; - unsigned int gpio_version; + enum lpc_gpio_versions gpio_version; enum intel_spi_type spi_type; + const struct lpc_ich_gpio_info *gpio_info; u8 use_gpio; }; diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h index 3acceeedbaba..ea635d12a741 100644 --- a/include/linux/mfd/max77686-private.h +++ b/include/linux/mfd/max77686-private.h @@ -441,8 +441,4 @@ enum max77686_types { TYPE_MAX77802, }; -extern int max77686_irq_init(struct max77686_dev *max77686); -extern void max77686_irq_exit(struct max77686_dev *max77686); -extern int max77686_irq_resume(struct max77686_dev *max77686); - #endif /* __LINUX_MFD_MAX77686_PRIV_H */ diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 311f7d3d2323..54444ff2a5de 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -405,7 +405,7 @@ enum max77693_haptic_reg { MAX77693_HAPTIC_REG_END, }; -/* max77693-pmic LSCNFG configuraton register */ +/* max77693-pmic LSCNFG configuration register */ #define MAX77693_PMIC_LOW_SYS_MASK 0x80 #define MAX77693_PMIC_LOW_SYS_SHIFT 7 diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h index 0bc7454c4dbe..2fb4db67f110 100644 --- a/include/linux/mfd/max77843-private.h +++ b/include/linux/mfd/max77843-private.h @@ -198,7 +198,7 @@ enum max77843_irq_muic { #define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT) #define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT) -/* Max77843 charger insterrupts */ +/* Max77843 charger interrupts */ #define MAX77843_CHG_BYP_I BIT(0) #define MAX77843_CHG_BATP_I BIT(2) #define MAX77843_CHG_BAT_I BIT(3) diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h index 3d33517f178c..d83e87298ac4 100644 --- a/include/linux/mfd/mt6358/registers.h +++ b/include/linux/mfd/mt6358/registers.h @@ -262,6 +262,12 @@ #define MT6358_LDO_VBIF28_CON3 0x1db0 #define MT6358_VCAMA1_ANA_CON0 0x1e08 #define MT6358_VCAMA2_ANA_CON0 0x1e0c +#define MT6358_VFE28_ANA_CON0 0x1e10 +#define MT6358_VCN28_ANA_CON0 0x1e14 +#define MT6358_VBIF28_ANA_CON0 0x1e18 +#define MT6358_VAUD28_ANA_CON0 0x1e1c +#define MT6358_VAUX18_ANA_CON0 0x1e20 +#define MT6358_VXO22_ANA_CON0 0x1e24 #define MT6358_VCN33_ANA_CON0 0x1e28 #define MT6358_VSIM1_ANA_CON0 0x1e2c #define MT6358_VSIM2_ANA_CON0 0x1e30 @@ -288,4 +294,21 @@ #define MT6358_AUD_TOP_INT_CON0 0x2228 #define MT6358_AUD_TOP_INT_STATUS0 0x2234 +/* + * MT6366 has no VCAM*, but has other regulators in its place. The names + * keep the MT6358 prefix for ease of use in the regulator driver. + */ +#define MT6358_LDO_VSRAM_CON5 0x1bf8 +#define MT6358_LDO_VM18_CON0 MT6358_LDO_VCAMA1_CON0 +#define MT6358_LDO_VM18_CON1 MT6358_LDO_VCAMA1_CON1 +#define MT6358_LDO_VM18_CON2 MT6358_LDO_VCAMA1_CON2 +#define MT6358_LDO_VMDDR_CON0 MT6358_LDO_VCAMA2_CON0 +#define MT6358_LDO_VMDDR_CON1 MT6358_LDO_VCAMA2_CON1 +#define MT6358_LDO_VMDDR_CON2 MT6358_LDO_VCAMA2_CON2 +#define MT6358_LDO_VSRAM_CORE_CON0 MT6358_LDO_VCAMD_CON0 +#define MT6358_LDO_VSRAM_CORE_DBG0 0x1cb6 +#define MT6358_LDO_VSRAM_CORE_DBG1 0x1cb8 +#define MT6358_VM18_ANA_CON0 MT6358_VCAMA1_ANA_CON0 +#define MT6358_VMDDR_ANA_CON0 MT6358_VCAMD_ANA_CON0 + #endif /* __MFD_MT6358_REGISTERS_H__ */ diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h index c5173bc06270..8421d49500bf 100644 --- a/include/linux/mfd/rz-mtu3.h +++ b/include/linux/mfd/rz-mtu3.h @@ -151,7 +151,6 @@ struct rz_mtu3 { void *priv_data; }; -#if IS_ENABLED(CONFIG_RZ_MTU3) static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch) { mutex_lock(&ch->lock); @@ -188,70 +187,5 @@ void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val); void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val); void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off, u16 pos, u8 val); -#else -static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch) -{ - return false; -} - -static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch) -{ -} - -static inline bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch) -{ - return false; -} - -static inline void rz_mtu3_disable(struct rz_mtu3_channel *ch) -{ -} - -static inline int rz_mtu3_enable(struct rz_mtu3_channel *ch) -{ - return 0; -} - -static inline u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off) -{ - return 0; -} - -static inline u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off) -{ - return 0; -} - -static inline u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off) -{ - return 0; -} - -static inline u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off) -{ - return 0; -} - -static inline void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val) -{ -} - -static inline void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val) -{ -} - -static inline void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val) -{ -} - -static inline void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val) -{ -} - -static inline void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, - u16 off, u16 pos, u8 val) -{ -} -#endif #endif /* __MFD_RZ_MTU3_H__ */ diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h index 18363b773d07..cb99e16ca947 100644 --- a/include/linux/mfd/si476x-platform.h +++ b/include/linux/mfd/si476x-platform.h @@ -10,7 +10,7 @@ #ifndef __SI476X_PLATFORM_H__ #define __SI476X_PLATFORM_H__ -/* It is possible to select one of the four adresses using pins A0 +/* It is possible to select one of the four addresses using pins A0 * and A1 on SI476x */ #define SI476X_I2C_ADDR_1 0x60 #define SI476X_I2C_ADDR_2 0x61 diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h index 1b94325febb3..ca35af30745f 100644 --- a/include/linux/mfd/stm32-timers.h +++ b/include/linux/mfd/stm32-timers.h @@ -102,6 +102,15 @@ enum stm32_timers_dmas { STM32_TIMERS_MAX_DMAS, }; +/* STM32 Timer may have either a unique global interrupt or 4 interrupt lines */ +enum stm32_timers_irqs { + STM32_TIMERS_IRQ_GLOBAL_BRK, /* global or brk IRQ */ + STM32_TIMERS_IRQ_UP, + STM32_TIMERS_IRQ_TRG_COM, + STM32_TIMERS_IRQ_CC, + STM32_TIMERS_MAX_IRQS, +}; + /** * struct stm32_timers_dma - STM32 timer DMA handling. * @completion: end of DMA transfer completion @@ -123,6 +132,8 @@ struct stm32_timers { struct regmap *regmap; u32 max_arr; struct stm32_timers_dma dma; /* Only to be used by the parent */ + unsigned int nr_irqs; + int irq[STM32_TIMERS_MAX_IRQS]; }; #if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS) diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h index 701925db75b3..f67ef0a4e041 100644 --- a/include/linux/mfd/tps65910.h +++ b/include/linux/mfd/tps65910.h @@ -749,7 +749,7 @@ #define VDDCTRL_ST_SHIFT 0 -/*Register VDDCTRL_OP (0x28) bit definitios */ +/*Register VDDCTRL_OP (0x28) bit definitions */ #define VDDCTRL_OP_CMD_MASK 0x80 #define VDDCTRL_OP_CMD_SHIFT 7 #define VDDCTRL_OP_SEL_MASK 0x7F diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 039943ec4d4e..d0f9b522f328 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -266,6 +266,7 @@ struct mhi_event_config { * struct mhi_controller_config - Root MHI controller configuration * @max_channels: Maximum number of channels supported * @timeout_ms: Timeout value for operations. 0 means use default + * @ready_timeout_ms: Timeout value for waiting device to be ready (optional) * @buf_len: Size of automatically allocated buffers. 0 means use default * @num_channels: Number of channels defined in @ch_cfg * @ch_cfg: Array of defined channels @@ -277,6 +278,7 @@ struct mhi_event_config { struct mhi_controller_config { u32 max_channels; u32 timeout_ms; + u32 ready_timeout_ms; u32 buf_len; u32 num_channels; const struct mhi_channel_config *ch_cfg; @@ -330,6 +332,7 @@ struct mhi_controller_config { * @pm_mutex: Mutex for suspend/resume operation * @pm_lock: Lock for protecting MHI power management state * @timeout_ms: Timeout in ms for state transitions + * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional) * @pm_state: MHI power management state * @db_access: DB access states * @ee: MHI device execution environment @@ -419,6 +422,7 @@ struct mhi_controller { struct mutex pm_mutex; rwlock_t pm_lock; u32 timeout_ms; + u32 ready_timeout_ms; u32 pm_state; u32 db_access; enum mhi_ee_type ee; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index f198a8ac7ee7..11bf3212f782 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -50,6 +50,27 @@ struct mhi_ep_db_info { }; /** + * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info + * @mhi_dev: MHI device associated with this buffer + * @dev_addr: Address of the buffer in endpoint + * @host_addr: Address of the bufffer in host + * @size: Size of the buffer + * @code: Transfer completion code + * @cb: Callback to be executed by controller drivers after transfer completion (async) + * @cb_buf: Opaque buffer to be passed to the callback + */ +struct mhi_ep_buf_info { + struct mhi_ep_device *mhi_dev; + void *dev_addr; + u64 host_addr; + size_t size; + int code; + + void (*cb)(struct mhi_ep_buf_info *buf_info); + void *cb_buf; +}; + +/** * struct mhi_ep_cntrl - MHI Endpoint controller structure * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI * Endpoint controller @@ -82,8 +103,10 @@ struct mhi_ep_db_info { * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context - * @read_from_host: CB function for reading from host memory from endpoint - * @write_to_host: CB function for writing to host memory from endpoint + * @read_sync: CB function for reading from host memory synchronously + * @write_sync: CB function for writing to host memory synchronously + * @read_async: CB function for reading from host memory asynchronously + * @write_async: CB function for writing to host memory asynchronously * @mhi_state: MHI Endpoint state * @max_chan: Maximum channels supported by the endpoint controller * @mru: MRU (Maximum Receive Unit) value of the endpoint controller @@ -128,14 +151,19 @@ struct mhi_ep_cntrl { struct work_struct reset_work; struct work_struct cmd_ring_work; struct work_struct ch_ring_work; + struct kmem_cache *ring_item_cache; + struct kmem_cache *ev_ring_el_cache; + struct kmem_cache *tre_buf_cache; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, void __iomem **virt, size_t size); void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys, void __iomem *virt, size_t size); - int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size); - int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size); + int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info); + int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info); + int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info); + int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info); enum mhi_state mhi_state; diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 8bef1ab62bba..591bf5b5e8dc 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -41,9 +41,10 @@ #define PHY_ID_KSZ9477 0x00221631 /* struct phy_device dev_flags definitions */ -#define MICREL_PHY_50MHZ_CLK 0x00000001 -#define MICREL_PHY_FXEN 0x00000002 -#define MICREL_KSZ8_P1_ERRATA 0x00000003 +#define MICREL_PHY_50MHZ_CLK BIT(0) +#define MICREL_PHY_FXEN BIT(1) +#define MICREL_KSZ8_P1_ERRATA BIT(2) +#define MICREL_NO_EEE BIT(3) #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC @@ -63,6 +64,10 @@ #define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1) #define KSZ886X_BMCR_DISABLE_LED BIT(0) +/* PHY Special Control/Status Register (Reg 31) */ #define KSZ886X_CTRL_MDIX_STAT BIT(4) +#define KSZ886X_CTRL_FORCE_LINK BIT(3) +#define KSZ886X_CTRL_PWRSAVE BIT(2) +#define KSZ886X_CTRL_REMOTE_LOOPBACK BIT(1) #endif /* _MICREL_PHY_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 711dd9412561..2ce13e8a309b 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page) } #ifdef CONFIG_NUMA_BALANCING -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, +int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node); #else -static inline int migrate_misplaced_page(struct page *page, +static inline int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h index fa940bbaf8ae..26b04f73f214 100644 --- a/include/linux/mii_timestamper.h +++ b/include/linux/mii_timestamper.h @@ -9,6 +9,7 @@ #include <linux/device.h> #include <linux/ethtool.h> #include <linux/skbuff.h> +#include <linux/net_tstamp.h> struct phy_device; @@ -51,7 +52,8 @@ struct mii_timestamper { struct sk_buff *skb, int type); int (*hwtstamp)(struct mii_timestamper *mii_ts, - struct ifreq *ifreq); + struct kernel_hwtstamp_config *kernel_config, + struct netlink_ext_ack *extack); void (*link_state)(struct mii_timestamper *mii_ts, struct phy_device *phydev); diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 83aebc244cba..2ec559284a9f 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -2,60 +2,77 @@ #ifndef _LINUX_MINMAX_H #define _LINUX_MINMAX_H +#include <linux/build_bug.h> +#include <linux/compiler.h> #include <linux/const.h> #include <linux/types.h> /* * min()/max()/clamp() macros must accomplish three things: * - * - avoid multiple evaluations of the arguments (so side-effects like + * - Avoid multiple evaluations of the arguments (so side-effects like * "x++" happen only once) when non-constant. - * - perform strict type-checking (to generate warnings instead of - * nasty runtime surprises). See the "unnecessary" pointer comparison - * in __typecheck(). - * - retain result as a constant expressions when called with only + * - Retain result as a constant expressions when called with only * constant expressions (to avoid tripping VLA warnings in stack * allocation usage). + * - Perform signed v unsigned type-checking (to generate compile + * errors instead of nasty runtime surprises). + * - Unsigned char/short are always promoted to signed int and can be + * compared against signed or unsigned arguments. + * - Unsigned arguments can be compared against non-negative signed constants. + * - Comparison of a signed argument against an unsigned constant fails + * even if the constant is below __INT_MAX__ and could be cast to int. */ #define __typecheck(x, y) \ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) -#define __no_side_effects(x, y) \ - (__is_constexpr(x) && __is_constexpr(y)) +/* is_signed_type() isn't a constexpr for pointer types */ +#define __is_signed(x) \ + __builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \ + is_signed_type(typeof(x)), 0) -#define __safe_cmp(x, y) \ - (__typecheck(x, y) && __no_side_effects(x, y)) +/* True for a non-negative signed int constant */ +#define __is_noneg_int(x) \ + (__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0) -#define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) +#define __types_ok(x, y) \ + (__is_signed(x) == __is_signed(y) || \ + __is_signed((x) + 0) == __is_signed((y) + 0) || \ + __is_noneg_int(x) || __is_noneg_int(y)) -#define __cmp_once(x, y, unique_x, unique_y, op) ({ \ - typeof(x) unique_x = (x); \ - typeof(y) unique_y = (y); \ - __cmp(unique_x, unique_y, op); }) +#define __cmp_op_min < +#define __cmp_op_max > -#define __careful_cmp(x, y, op) \ - __builtin_choose_expr(__safe_cmp(x, y), \ - __cmp(x, y, op), \ - __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) +#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y)) + +#define __cmp_once(op, x, y, unique_x, unique_y) ({ \ + typeof(x) unique_x = (x); \ + typeof(y) unique_y = (y); \ + static_assert(__types_ok(x, y), \ + #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \ + __cmp(op, unique_x, unique_y); }) + +#define __careful_cmp(op, x, y) \ + __builtin_choose_expr(__is_constexpr((x) - (y)), \ + __cmp(op, x, y), \ + __cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y))) #define __clamp(val, lo, hi) \ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val))) -#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \ - typeof(val) unique_val = (val); \ - typeof(lo) unique_lo = (lo); \ - typeof(hi) unique_hi = (hi); \ - __clamp(unique_val, unique_lo, unique_hi); }) - -#define __clamp_input_check(lo, hi) \ - (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ - __is_constexpr((lo) > (hi)), (lo) > (hi), false))) +#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \ + typeof(val) unique_val = (val); \ + typeof(lo) unique_lo = (lo); \ + typeof(hi) unique_hi = (hi); \ + static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \ + (lo) <= (hi), true), \ + "clamp() low limit " #lo " greater than high limit " #hi); \ + static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \ + static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \ + __clamp(unique_val, unique_lo, unique_hi); }) #define __careful_clamp(val, lo, hi) ({ \ - __clamp_input_check(lo, hi) + \ - __builtin_choose_expr(__typecheck(val, lo) && __typecheck(val, hi) && \ - __typecheck(hi, lo) && __is_constexpr(val) && \ - __is_constexpr(lo) && __is_constexpr(hi), \ + __builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \ __clamp(val, lo, hi), \ __clamp_once(val, lo, hi, __UNIQUE_ID(__val), \ __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); }) @@ -65,14 +82,31 @@ * @x: first value * @y: second value */ -#define min(x, y) __careful_cmp(x, y, <) +#define min(x, y) __careful_cmp(min, x, y) /** * max - return maximum of two values of the same or compatible types * @x: first value * @y: second value */ -#define max(x, y) __careful_cmp(x, y, >) +#define max(x, y) __careful_cmp(max, x, y) + +/** + * umin - return minimum of two non-negative values + * Signed types are zero extended to match a larger unsigned type. + * @x: first value + * @y: second value + */ +#define umin(x, y) \ + __careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull) + +/** + * umax - return maximum of two non-negative values + * @x: first value + * @y: second value + */ +#define umax(x, y) \ + __careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull) /** * min3 - return minimum of three values @@ -124,7 +158,7 @@ * @x: first value * @y: second value */ -#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) +#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y)) /** * max_t - return maximum of two values, using the specified type @@ -132,28 +166,7 @@ * @x: first value * @y: second value */ -#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >) - -/* - * Remove a const qualifier from integer types - * _Generic(foo, type-name: association, ..., default: association) performs a - * comparison against the foo type (not the qualified type). - * Do not use the const keyword in the type-name as it will not match the - * unqualified type of foo. - */ -#define __unconst_integer_type_cases(type) \ - unsigned type: (unsigned type)0, \ - signed type: (signed type)0 - -#define __unconst_integer_typeof(x) typeof( \ - _Generic((x), \ - char: (char)0, \ - __unconst_integer_type_cases(char), \ - __unconst_integer_type_cases(short), \ - __unconst_integer_type_cases(int), \ - __unconst_integer_type_cases(long), \ - __unconst_integer_type_cases(long long), \ - default: (x))) +#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y)) /* * Do not check the array parameter using __must_be_array(). @@ -169,13 +182,13 @@ * 'int *buff' and 'int buff[N]' types. * * The array can be an array of const items. - * typeof() keeps the const qualifier. Use __unconst_integer_typeof() in order + * typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order * to discard the const qualifier for the __element variable. */ #define __minmax_array(op, array, len) ({ \ typeof(&(array)[0]) __array = (array); \ typeof(len) __len = (len); \ - __unconst_integer_typeof(__array[0]) __element = __array[--__len]; \ + __unqual_scalar_typeof(__array[0]) __element = __array[--__len];\ while (__len--) \ __element = op(__element, __array[__len]); \ __element; }) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 4d5be378fa8c..01275c6e8468 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -366,6 +366,9 @@ enum mlx5_driver_event { MLX5_DRIVER_EVENT_UPLINK_NETDEV, MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, + MLX5_DRIVER_EVENT_SF_PEER_DEVLINK, + MLX5_DRIVER_EVENT_AFFILIATION_DONE, + MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, }; enum { @@ -915,7 +918,7 @@ static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe) return (cqe->tls_outer_l3_tunneled >> 3) & 0x3; } -static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) +static inline bool cqe_has_vlan(const struct mlx5_cqe64 *cqe) { return cqe->l4_l3_hdr_type & 0x1; } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 3033bbaeac81..41f03b352401 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -150,11 +150,14 @@ enum { MLX5_REG_MTPPSE = 0x9054, MLX5_REG_MTUTC = 0x9055, MLX5_REG_MPEGC = 0x9056, + MLX5_REG_MPIR = 0x9059, MLX5_REG_MCQS = 0x9060, MLX5_REG_MCQI = 0x9061, MLX5_REG_MCC = 0x9062, MLX5_REG_MCDA = 0x9063, MLX5_REG_MCAM = 0x907f, + MLX5_REG_MSECQ = 0x9155, + MLX5_REG_MSEES = 0x9156, MLX5_REG_MIRC = 0x9162, MLX5_REG_SBCAM = 0xB01F, MLX5_REG_RESOURCE_DUMP = 0xC000, @@ -613,6 +616,7 @@ struct mlx5_priv { int adev_idx; int sw_vhca_id; struct mlx5_events *events; + struct mlx5_vhca_events *vhca_events; struct mlx5_flow_steering *steering; struct mlx5_mpfs *mpfs; @@ -621,6 +625,7 @@ struct mlx5_priv { struct mlx5_lag *lag; u32 flags; struct mlx5_devcom_dev *devc; + struct mlx5_devcom_comp_dev *hca_devcom_comp; struct mlx5_fw_reset *fw_reset; struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; @@ -674,6 +679,9 @@ struct mlx5e_resources { struct mlx5_td td; u32 mkey; struct mlx5_sq_bfreg bfreg; +#define MLX5_MAX_NUM_TC 8 + u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]; + bool tisn_valid; } hw_objs; struct net_device *uplink_netdev; struct mutex uplink_netdev_lock; @@ -684,6 +692,7 @@ enum mlx5_sw_icm_type { MLX5_SW_ICM_TYPE_STEERING, MLX5_SW_ICM_TYPE_HEADER_MODIFY, MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN, + MLX5_SW_ICM_TYPE_SW_ENCAP, }; #define MLX5_MAX_RESERVED_GIDS 8 @@ -1027,6 +1036,8 @@ bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev); +void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data); + void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); @@ -1037,10 +1048,6 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev); int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); -struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, - gfp_t flags, int npages); -void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, - struct mlx5_cmd_mailbox *head); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, int inlen); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey); @@ -1054,8 +1061,6 @@ void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev); void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev); -void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s32 npages, bool ec_function); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); @@ -1095,8 +1100,6 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); __be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); -int mlx5_query_odp_caps(struct mlx5_core_dev *dev, - struct mlx5_odp_caps *odp_caps); int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); @@ -1198,12 +1201,6 @@ int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev, void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev, int vf_id, struct notifier_block *nb); -#ifdef CONFIG_MLX5_CORE_IPOIB -struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, - struct ib_device *ibdev, - const char *name, - void (*setup)(struct net_device *)); -#endif /* CONFIG_MLX5_CORE_IPOIB */ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, struct ib_device *device, struct rdma_netdev_alloc_params *params); diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 950d2431a53c..df73a2ccc9af 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -7,6 +7,7 @@ #define _MLX5_ESWITCH_ #include <linux/mlx5/driver.h> +#include <linux/mlx5/vport.h> #include <net/devlink.h> #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) @@ -210,4 +211,11 @@ static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS; } +/* The returned number is valid only when the dev is eswitch manager. */ +static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) +{ + return mlx5_core_is_ecpf_esw_manager(dev) ? + MLX5_VPORT_ECPF : MLX5_VPORT_PF; +} + #endif diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 1e00c2436377..3fb428ce7d1c 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -67,6 +67,7 @@ enum { MLX5_FLOW_TABLE_TERMINATION = BIT(2), MLX5_FLOW_TABLE_UNMANAGED = BIT(3), MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4), + MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5), }; #define LEFTOVERS_RULE_NUM 2 @@ -131,6 +132,7 @@ struct mlx5_flow_handle; enum { FLOW_CONTEXT_HAS_TAG = BIT(0), + FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1), }; struct mlx5_flow_context { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index fc3db401f8a2..c726f90ab752 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -312,6 +312,7 @@ enum { MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d, MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e, MLX5_CMD_OP_SYNC_CRYPTO = 0xb12, + MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16, MLX5_CMD_OP_MAX }; @@ -434,7 +435,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 flow_table_modify[0x1]; u8 reformat[0x1]; u8 decap[0x1]; - u8 reserved_at_9[0x1]; + u8 reset_root_to_default[0x1]; u8 pop_vlan[0x1]; u8 push_vlan[0x1]; u8 reserved_at_c[0x1]; @@ -620,7 +621,7 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_140[0x8]; u8 bth_dst_qp[0x18]; - u8 reserved_at_160[0x20]; + u8 inner_esp_spi[0x20]; u8 outer_esp_spi[0x20]; u8 reserved_at_1a0[0x60]; }; @@ -1192,7 +1193,8 @@ struct mlx5_ifc_device_mem_cap_bits { u8 log_sw_icm_alloc_granularity[0x6]; u8 log_steering_sw_icm_size[0x8]; - u8 reserved_at_120[0x18]; + u8 log_indirect_encap_sw_icm_size[0x8]; + u8 reserved_at_128[0x10]; u8 log_header_modify_pattern_sw_icm_size[0x8]; u8 header_modify_sw_icm_start_address[0x40]; @@ -1203,7 +1205,11 @@ struct mlx5_ifc_device_mem_cap_bits { u8 memic_operations[0x20]; - u8 reserved_at_220[0x5e0]; + u8 reserved_at_220[0x20]; + + u8 indirect_encap_sw_icm_start_address[0x40]; + + u8 reserved_at_280[0x580]; }; struct mlx5_ifc_device_event_cap_bits { @@ -1231,7 +1237,14 @@ struct mlx5_ifc_virtio_emulation_cap_bits { u8 max_emulated_devices[0x8]; u8 max_num_virtio_queues[0x18]; - u8 reserved_at_a0[0x60]; + u8 reserved_at_a0[0x20]; + + u8 reserved_at_c0[0x13]; + u8 desc_group_mkey_supported[0x1]; + u8 freeze_to_rdy_supported[0x1]; + u8 reserved_at_d5[0xb]; + + u8 reserved_at_e0[0x20]; u8 umem_1_buffer_param_a[0x20]; @@ -1794,7 +1807,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 disable_local_lb_uc[0x1]; u8 disable_local_lb_mc[0x1]; u8 log_min_hairpin_wq_data_sz[0x5]; - u8 reserved_at_3e8[0x2]; + u8 reserved_at_3e8[0x1]; + u8 silent_mode[0x1]; u8 vhca_state[0x1]; u8 log_max_vlan_list[0x5]; u8 reserved_at_3f0[0x3]; @@ -1811,7 +1825,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_460[0x1]; u8 ats[0x1]; - u8 reserved_at_462[0x1]; + u8 cross_vhca_rqt[0x1]; u8 log_max_uctx[0x5]; u8 reserved_at_468[0x1]; u8 crypto[0x1]; @@ -1934,6 +1948,15 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 match_definer_format_supported[0x40]; }; +enum { + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000, + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE = (1ULL << 20), +}; + +enum { + MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE = 0x200, +}; + struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_0[0x80]; @@ -1948,9 +1971,15 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_c0[0x8]; u8 migration_multi_load[0x1]; u8 migration_tracking_state[0x1]; - u8 reserved_at_ca[0x16]; + u8 reserved_at_ca[0x6]; + u8 migration_in_chunks[0x1]; + u8 reserved_at_d1[0xf]; + + u8 cross_vhca_object_to_object_supported[0x20]; - u8 reserved_at_e0[0xc0]; + u8 allowed_object_for_other_vhca_access[0x40]; + + u8 reserved_at_140[0x60]; u8 flow_table_type_2_type[0x8]; u8 reserved_at_1a8[0x3]; @@ -1971,7 +2000,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_260[0x120]; u8 reserved_at_380[0x10]; u8 ec_vf_vport_base[0x10]; - u8 reserved_at_3a0[0x460]; + + u8 reserved_at_3a0[0x10]; + u8 max_rqt_vhca_id[0x10]; + + u8 reserved_at_3c0[0x440]; }; enum mlx5_ifc_flow_destination_type { @@ -2130,6 +2163,13 @@ struct mlx5_ifc_rq_num_bits { u8 rq_num[0x18]; }; +struct mlx5_ifc_rq_vhca_bits { + u8 reserved_at_0[0x8]; + u8 rq_num[0x18]; + u8 reserved_at_20[0x10]; + u8 rq_vhca_id[0x10]; +}; + struct mlx5_ifc_mac_address_layout_bits { u8 reserved_at_0[0x10]; u8 mac_addr_47_32[0x10]; @@ -3536,7 +3576,7 @@ struct mlx5_ifc_flow_context_bits { u8 action[0x10]; u8 extended_destination[0x1]; - u8 reserved_at_81[0x1]; + u8 uplink_hairpin_en[0x1]; u8 flow_source[0x2]; u8 encrypt_decrypt_type[0x4]; u8 destination_list_size[0x18]; @@ -3880,7 +3920,10 @@ struct mlx5_ifc_rqtc_bits { u8 reserved_at_e0[0x6a0]; - struct mlx5_ifc_rq_num_bits rq_num[]; + union { + DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_num_bits, rq_num); + DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_vhca_bits, rq_vhca); + }; }; enum { @@ -3993,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits { u8 affiliation_criteria[0x4]; u8 affiliated_vhca_id[0x10]; - u8 reserved_at_60[0xd0]; + u8 reserved_at_60[0xa0]; + u8 reserved_at_100[0x1]; + u8 sd_group[0x3]; + u8 reserved_at_104[0x1c]; + + u8 reserved_at_120[0x10]; u8 mtu[0x10]; u8 system_image_guid[0x40]; @@ -4723,7 +4771,10 @@ struct mlx5_ifc_set_l2_table_entry_in_bits { u8 reserved_at_c0[0x20]; - u8 reserved_at_e0[0x13]; + u8 reserved_at_e0[0x10]; + u8 silent_mode_valid[0x1]; + u8 silent_mode[0x1]; + u8 reserved_at_f2[0x1]; u8 vlan_valid[0x1]; u8 vlan[0xc]; @@ -6369,6 +6420,28 @@ struct mlx5_ifc_general_obj_out_cmd_hdr_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_allow_other_vhca_access_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0x50]; + u8 object_type_to_be_accessed[0x10]; + u8 object_id_to_be_accessed[0x20]; + u8 reserved_at_c0[0x40]; + union { + u8 access_key_raw[0x100]; + u8 access_key[8][0x20]; + }; +}; + +struct mlx5_ifc_allow_other_vhca_access_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_modify_header_arg_bits { u8 reserved_at_0[0x80]; @@ -6391,6 +6464,24 @@ struct mlx5_ifc_create_match_definer_out_bits { struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; }; +struct mlx5_ifc_alias_context_bits { + u8 vhca_id_to_be_accessed[0x10]; + u8 reserved_at_10[0xd]; + u8 status[0x3]; + u8 object_id_to_be_accessed[0x20]; + u8 reserved_at_40[0x40]; + union { + u8 access_key_raw[0x100]; + u8 access_key[8][0x20]; + }; + u8 metadata[0x80]; +}; + +struct mlx5_ifc_create_alias_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_alias_context_bits alias_ctx; +}; + enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, @@ -10028,6 +10119,19 @@ struct mlx5_ifc_mpegc_reg_bits { u8 reserved_at_60[0x100]; }; +struct mlx5_ifc_mpir_reg_bits { + u8 sdm[0x1]; + u8 reserved_at_1[0x1b]; + u8 host_buses[0x4]; + + u8 reserved_at_20[0x20]; + + u8 local_port[0x8]; + u8 reserved_at_28[0x18]; + + u8 reserved_at_60[0x20]; +}; + enum { MLX5_MTUTC_FREQ_ADJ_UNITS_PPB = 0x0, MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM = 0x1, @@ -10042,7 +10146,10 @@ enum { struct mlx5_ifc_mtutc_reg_bits { u8 reserved_at_0[0x5]; u8 freq_adj_units[0x3]; - u8 reserved_at_8[0x14]; + u8 reserved_at_8[0x3]; + u8 log_max_freq_adjustment[0x5]; + + u8 reserved_at_10[0xc]; u8 operation[0x4]; u8 freq_adjustment[0x20]; @@ -10176,7 +10283,9 @@ struct mlx5_ifc_mcam_access_reg_bits2 { u8 mirc[0x1]; u8 regs_97_to_96[0x2]; - u8 regs_95_to_64[0x20]; + u8 regs_95_to_87[0x09]; + u8 synce_registers[0x2]; + u8 regs_84_to_64[0x15]; u8 regs_63_to_32[0x20]; @@ -10572,6 +10681,7 @@ enum { MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe, MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf, MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12, }; struct mlx5_ifc_initial_seg_bits { @@ -11917,6 +12027,7 @@ enum { MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24, MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27, MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15, }; enum { @@ -11936,6 +12047,13 @@ enum { MLX5_IPSEC_ASO_INC_SN = 0x2, }; +enum { + MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0, + MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1, + MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2, + MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3, +}; + struct mlx5_ifc_ipsec_aso_bits { u8 valid[0x1]; u8 reserved_at_201[0x1]; @@ -12392,7 +12510,8 @@ struct mlx5_ifc_query_vhca_migration_state_in_bits { u8 op_mod[0x10]; u8 incremental[0x1]; - u8 reserved_at_41[0xf]; + u8 chunk[0x1]; + u8 reserved_at_42[0xe]; u8 vhca_id[0x10]; u8 reserved_at_60[0x20]; @@ -12408,7 +12527,11 @@ struct mlx5_ifc_query_vhca_migration_state_out_bits { u8 required_umem_size[0x20]; - u8 reserved_at_a0[0x160]; + u8 reserved_at_a0[0x20]; + + u8 remaining_total_size[0x40]; + + u8 reserved_at_100[0x100]; }; struct mlx5_ifc_save_vhca_state_in_bits { @@ -12440,7 +12563,7 @@ struct mlx5_ifc_save_vhca_state_out_bits { u8 actual_image_size[0x20]; - u8 reserved_at_60[0x20]; + u8 next_required_umem_size[0x20]; }; struct mlx5_ifc_load_vhca_state_in_bits { @@ -12549,4 +12672,59 @@ struct mlx5_ifc_modify_page_track_obj_in_bits { struct mlx5_ifc_page_track_bits obj_context; }; +struct mlx5_ifc_msecq_reg_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x12]; + u8 network_option[0x2]; + u8 local_ssm_code[0x4]; + u8 local_enhanced_ssm_code[0x8]; + + u8 local_clock_identity[0x40]; + + u8 reserved_at_80[0x180]; +}; + +enum { + MLX5_MSEES_FIELD_SELECT_ENABLE = BIT(0), + MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS = BIT(1), + MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE = BIT(2), +}; + +enum mlx5_msees_admin_status { + MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING = 0x0, + MLX5_MSEES_ADMIN_STATUS_TRACK = 0x1, +}; + +enum mlx5_msees_oper_status { + MLX5_MSEES_OPER_STATUS_FREE_RUNNING = 0x0, + MLX5_MSEES_OPER_STATUS_SELF_TRACK = 0x1, + MLX5_MSEES_OPER_STATUS_OTHER_TRACK = 0x2, + MLX5_MSEES_OPER_STATUS_HOLDOVER = 0x3, + MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER = 0x4, + MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5, +}; + +struct mlx5_ifc_msees_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 lp_msb[0x2]; + u8 reserved_at_14[0xc]; + + u8 field_select[0x20]; + + u8 admin_status[0x4]; + u8 oper_status[0x4]; + u8 ho_acq[0x1]; + u8 reserved_at_49[0xc]; + u8 admin_freq_measure[0x1]; + u8 oper_freq_measure[0x1]; + u8 failure_reason[0x9]; + + u8 frequency_diff[0x20]; + + u8 reserved_at_80[0x180]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 9becdc3fa503..40371c916cf9 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -74,7 +74,11 @@ struct mlx5_ifc_virtio_q_bits { u8 reserved_at_320[0x8]; u8 pd[0x18]; - u8 reserved_at_340[0xc0]; + u8 reserved_at_340[0x20]; + + u8 desc_group_mkey[0x20]; + + u8 reserved_at_380[0x80]; }; struct mlx5_ifc_virtio_net_q_object_bits { @@ -141,6 +145,11 @@ enum { MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0, MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3, MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4, + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6, + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7, + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8, + MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY = (u64)1 << 11, + MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14, }; enum { diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 98b2e1e149f9..26092c78a985 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -115,8 +115,9 @@ enum mlx5e_ext_link_mode { MLX5E_100GAUI_1_100GBASE_CR_KR = 11, MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13, - MLX5E_400GAUI_8 = 15, + MLX5E_400GAUI_8_400GBASE_CR8 = 15, MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16, + MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19, MLX5E_EXT_LINK_MODES_NUMBER, }; diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index fbb9bf447889..c36cc6d82926 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, u64 *system_image_guid); +int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group); int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, u16 vport, u64 node_guid); diff --git a/include/linux/mm.h b/include/linux/mm.h index bf5d0b1b16f4..f5a97dec5169 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -362,8 +362,6 @@ extern unsigned int kobjsize(const void *objp); # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 -#elif defined(CONFIG_IA64) -# define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_SPARC64) # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ # define VM_ARCH_CLEAR VM_SPARC_ADI @@ -619,7 +617,7 @@ struct vm_operations_struct { * policy. */ struct mempolicy *(*get_policy)(struct vm_area_struct *vma, - unsigned long addr); + unsigned long addr, pgoff_t *ilx); #endif /* * Called by vm_normal_page() for special PTEs to find the @@ -888,8 +886,8 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma) */ static inline bool vma_is_initial_heap(const struct vm_area_struct *vma) { - return vma->vm_start <= vma->vm_mm->brk && - vma->vm_end >= vma->vm_mm->start_brk; + return vma->vm_start < vma->vm_mm->brk && + vma->vm_end > vma->vm_mm->start_brk; } /* @@ -903,8 +901,8 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma) * its "stack". It's not even well-defined for programs written * languages like Go. */ - return vma->vm_start <= vma->vm_mm->start_stack && - vma->vm_end >= vma->vm_mm->start_stack; + return vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack; } static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) @@ -937,6 +935,17 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma) return vma->vm_flags & VM_ACCESS_FLAGS; } +static inline bool is_shared_maywrite(vm_flags_t vm_flags) +{ + return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == + (VM_SHARED | VM_MAYWRITE); +} + +static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) +{ + return is_shared_maywrite(vma->vm_flags); +} + static inline struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) { @@ -985,6 +994,17 @@ static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, return mas_expected_entries(&vmi->mas, count); } +static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, + unsigned long start, unsigned long end, gfp_t gfp) +{ + __mas_set_range(&vmi->mas, start, end - 1); + mas_store_gfp(&vmi->mas, NULL, gfp); + if (unlikely(mas_is_err(&vmi->mas))) + return -ENOMEM; + + return 0; +} + /* Free any unused preallocations */ static inline void vma_iter_free(struct vma_iterator *vmi) { @@ -1337,7 +1357,6 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, struct page *page, unsigned int nr, unsigned long addr); vm_fault_t finish_fault(struct vm_fault *vmf); -vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #endif /* @@ -1686,26 +1705,26 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS -static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { - return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); + return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK); } -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return page->_last_cpupid; + return folio->_last_cpupid; } static inline void page_cpupid_reset_last(struct page *page) { page->_last_cpupid = -1 & LAST_CPUPID_MASK; } #else -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; + return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; } -extern int page_cpupid_xchg_last(struct page *page, int cpupid); +int folio_xchg_last_cpupid(struct folio *folio, int cpupid); static inline void page_cpupid_reset_last(struct page *page) { @@ -1713,11 +1732,12 @@ static inline void page_cpupid_reset_last(struct page *page) } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ -static inline int xchg_page_access_time(struct page *page, int time) +static inline int folio_xchg_access_time(struct folio *folio, int time) { int last_time; - last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS); + last_time = folio_xchg_last_cpupid(folio, + time >> PAGE_ACCESS_TIME_BUCKETS); return last_time << PAGE_ACCESS_TIME_BUCKETS; } @@ -1726,24 +1746,24 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) unsigned int pid_bit; pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG)); - if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->access_pids[1])) { - __set_bit(pid_bit, &vma->numab_state->access_pids[1]); + if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) { + __set_bit(pid_bit, &vma->numab_state->pids_active[1]); } } #else /* !CONFIG_NUMA_BALANCING */ -static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { - return page_to_nid(page); /* XXX */ + return folio_nid(folio); /* XXX */ } -static inline int xchg_page_access_time(struct page *page, int time) +static inline int folio_xchg_access_time(struct folio *folio, int time) { return 0; } -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return page_to_nid(page); /* XXX */ + return folio_nid(folio); /* XXX */ } static inline int cpupid_to_nid(int cpupid) @@ -1795,7 +1815,7 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) static inline u8 page_kasan_tag(const struct page *page) { - u8 tag = 0xff; + u8 tag = KASAN_TAG_KERNEL; if (kasan_enabled()) { tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; @@ -1824,7 +1844,7 @@ static inline void page_kasan_tag_set(struct page *page, u8 tag) static inline void page_kasan_tag_reset(struct page *page) { if (kasan_enabled()) - page_kasan_tag_set(page, 0xff); + page_kasan_tag_set(page, KASAN_TAG_KERNEL); } #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ @@ -1944,15 +1964,15 @@ static inline bool page_maybe_dma_pinned(struct page *page) * * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq. */ -static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, - struct page *page) +static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma, + struct folio *folio) { VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1)); if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) return false; - return page_maybe_dma_pinned(page); + return folio_maybe_dma_pinned(folio); } /** @@ -2327,6 +2347,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, pte_t pte); struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t pmd); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); @@ -2362,7 +2384,8 @@ extern void truncate_pagecache(struct inode *inode, loff_t new); extern void truncate_setsize(struct inode *inode, loff_t newsize); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); -int generic_error_remove_page(struct address_space *mapping, struct page *page); +int generic_error_remove_folio(struct address_space *mapping, + struct folio *folio); struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, unsigned long address, struct pt_regs *regs); @@ -2413,8 +2436,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); -extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, - void *buf, int len, unsigned int gup_flags); long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, @@ -2425,6 +2446,9 @@ long pin_user_pages_remote(struct mm_struct *mm, unsigned int gup_flags, struct page **pages, int *locked); +/* + * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT. + */ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, unsigned long addr, int gup_flags, @@ -2432,12 +2456,15 @@ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, { struct page *page; struct vm_area_struct *vma; - int got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL); + int got; + + if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT))) + return ERR_PTR(-EINVAL); + + got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL); if (got < 0) return ERR_PTR(got); - if (got == 0) - return NULL; vma = vma_lookup(mm, addr); if (WARN_ON_ONCE(!vma)) { @@ -2480,7 +2507,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, - bool need_rmap_locks); + bool need_rmap_locks, bool for_stack); /* * Flags used by change_protection(). For now we make it a bitmap so @@ -2628,14 +2655,6 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, *maxrss = hiwater_rss; } -#if defined(SPLIT_RSS_COUNTING) -void sync_mm_rss(struct mm_struct *mm); -#else -static inline void sync_mm_rss(struct mm_struct *mm) -{ -} -#endif - #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL static inline int pte_special(pte_t pte) { @@ -3057,6 +3076,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) return ptl; } +static inline void pagetable_pud_ctor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + __folio_set_pgtable(folio); + lruvec_stat_add_folio(folio, NR_PAGETABLE); +} + +static inline void pagetable_pud_dtor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + __folio_clear_pgtable(folio); + lruvec_stat_sub_folio(folio, NR_PAGETABLE); +} + extern void __init pagecache_init(void); extern void free_initmem(void); @@ -3221,22 +3256,73 @@ extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct *next); extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff); -extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi, - struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, - unsigned long end, unsigned long vm_flags, struct anon_vma *, - struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx, - struct anon_vma_name *); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); -extern int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *, - unsigned long addr, int new_below); -extern int split_vma(struct vma_iterator *vmi, struct vm_area_struct *, - unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); +struct vm_area_struct *vma_modify(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long vm_flags, + struct mempolicy *policy, + struct vm_userfaultfd_ctx uffd_ctx, + struct anon_vma_name *anon_name); + +/* We are about to modify the VMA's flags. */ +static inline struct vm_area_struct +*vma_modify_flags(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long new_flags) +{ + return vma_modify(vmi, prev, vma, start, end, new_flags, + vma_policy(vma), vma->vm_userfaultfd_ctx, + anon_vma_name(vma)); +} + +/* We are about to modify the VMA's flags and/or anon_name. */ +static inline struct vm_area_struct +*vma_modify_flags_name(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + unsigned long new_flags, + struct anon_vma_name *new_name) +{ + return vma_modify(vmi, prev, vma, start, end, new_flags, + vma_policy(vma), vma->vm_userfaultfd_ctx, new_name); +} + +/* We are about to modify the VMA's memory policy. */ +static inline struct vm_area_struct +*vma_modify_policy(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct mempolicy *new_pol) +{ + return vma_modify(vmi, prev, vma, start, end, vma->vm_flags, + new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma)); +} + +/* We are about to modify the VMA's flags and/or uffd context. */ +static inline struct vm_area_struct +*vma_modify_flags_uffd(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long new_flags, + struct vm_userfaultfd_ctx new_ctx) +{ + return vma_modify(vmi, prev, vma, start, end, new_flags, + vma_policy(vma), new_ctx, anon_vma_name(vma)); +} static inline int check_data_rlimit(unsigned long rlim, unsigned long new, @@ -3308,8 +3394,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len) static inline void mm_populate(unsigned long addr, unsigned long len) {} #endif -/* These take the mm semaphore themselves */ -extern int __must_check vm_brk(unsigned long, unsigned long); +/* This takes the mm semaphore itself */ extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); extern int vm_munmap(unsigned long, size_t); extern unsigned long __must_check vm_mmap(struct file *, unsigned long, @@ -3786,6 +3871,32 @@ void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap); #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) +{ + /* number of pfns from base where pfn_to_page() is valid */ + if (altmap) + return altmap->reserve + altmap->free; + return 0; +} + +static inline void vmem_altmap_free(struct vmem_altmap *altmap, + unsigned long nr_pfns) +{ + altmap->alloc -= nr_pfns; +} +#else +static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) +{ + return 0; +} + +static inline void vmem_altmap_free(struct vmem_altmap *altmap, + unsigned long nr_pfns) +{ +} +#endif + #define VMEMMAP_RESERVE_NR 2 #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap, @@ -3831,6 +3942,7 @@ enum mf_flags { MF_UNPOISON = 1 << 4, MF_SW_SIMULATED = 1 << 5, MF_NO_RETRY = 1 << 6, + MF_MEM_PRE_REMOVE = 1 << 7, }; int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, unsigned long count, int mf_flags); @@ -4000,25 +4112,26 @@ static inline void mem_dump_obj(void *object) {} #endif /** - * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it + * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and + * handle them. * @seals: the seals to check * @vma: the vma to operate on * - * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on - * the vma flags. Return 0 if check pass, or <0 for errors. + * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper + * check/handling on the vma flags. Return 0 if check pass, or <0 for errors. */ -static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) +static inline int seal_check_write(int seals, struct vm_area_struct *vma) { - if (seals & F_SEAL_FUTURE_WRITE) { + if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { /* * New PROT_WRITE and MAP_SHARED mmaps are not allowed when - * "future write" seal active. + * write seals are active. */ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) return -EPERM; /* - * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as + * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as * MAP_SHARED and read-only, take care to not allow mprotect to * revert protections on such mappings. Do this only for shared * mappings. For private mappings, don't need to mask @@ -4062,4 +4175,11 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end) #endif +static inline bool pfn_is_unaccepted_memory(unsigned long pfn) +{ + phys_addr_t paddr = pfn << PAGE_SHIFT; + + return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE); +} + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 8148b30a9df1..f4fe593c1400 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -4,6 +4,7 @@ #include <linux/atomic.h> #include <linux/huge_mm.h> +#include <linux/mm_types.h> #include <linux/swap.h> #include <linux/string.h> #include <linux/userfaultfd_k.h> @@ -231,22 +232,27 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, if (folio_test_unevictable(folio) || !lrugen->enabled) return false; /* - * There are three common cases for this page: - * 1. If it's hot, e.g., freshly faulted in or previously hot and - * migrated, add it to the youngest generation. - * 2. If it's cold but can't be evicted immediately, i.e., an anon page - * not in swapcache or a dirty page pending writeback, add it to the - * second oldest generation. - * 3. Everything else (clean, cold) is added to the oldest generation. + * There are four common cases for this page: + * 1. If it's hot, i.e., freshly faulted in, add it to the youngest + * generation, and it's protected over the rest below. + * 2. If it can't be evicted immediately, i.e., a dirty page pending + * writeback, add it to the second youngest generation. + * 3. If it should be evicted first, e.g., cold and clean from + * folio_rotate_reclaimable(), add it to the oldest generation. + * 4. Everything else falls between 2 & 3 above and is added to the + * second oldest generation if it's considered inactive, or the + * oldest generation otherwise. See lru_gen_is_active(). */ if (folio_test_active(folio)) seq = lrugen->max_seq; else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) || (folio_test_reclaim(folio) && (folio_test_dirty(folio) || folio_test_writeback(folio)))) - seq = lrugen->min_seq[type] + 1; - else + seq = lrugen->max_seq - 1; + else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq) seq = lrugen->min_seq[type]; + else + seq = lrugen->min_seq[type] + 1; gen = lru_gen_from_seq(seq); flags = (gen + 1UL) << LRU_GEN_PGOFF; @@ -352,15 +358,6 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) } #ifdef CONFIG_ANON_VMA_NAME -/* - * mmap_lock should be read-locked when calling anon_vma_name(). Caller should - * either keep holding the lock while using the returned pointer or it should - * raise anon_vma_name refcount before releasing the lock. - */ -extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma); -extern struct anon_vma_name *anon_vma_name_alloc(const char *name); -extern void anon_vma_name_free(struct kref *kref); - /* mmap_lock should be read-locked */ static inline void anon_vma_name_get(struct anon_vma_name *anon_name) { @@ -415,16 +412,6 @@ static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, } #else /* CONFIG_ANON_VMA_NAME */ -static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) -{ - return NULL; -} - -static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) -{ - return NULL; -} - static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 36c5b43999e6..8b611e13153e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -125,18 +125,7 @@ struct page { struct page_pool *pp; unsigned long _pp_mapping_pad; unsigned long dma_addr; - union { - /** - * dma_addr_upper: might require a 64-bit - * value on 32-bit architectures. - */ - unsigned long dma_addr_upper; - /** - * For frag page support, not supported in - * 32-bit architectures with 64-bit DMA. - */ - atomic_long_t pp_frag_count; - }; + atomic_long_t pp_ref_count; }; struct { /* Tail pages of compound page */ unsigned long compound_head; /* Bit zero is set */ @@ -199,6 +188,10 @@ struct page { not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; +#endif + #ifdef CONFIG_KMSAN /* * KMSAN metadata for this page: @@ -210,10 +203,6 @@ struct page { struct page *kmsan_shadow; struct page *kmsan_origin; #endif - -#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS - int _last_cpupid; -#endif } _struct_page_alignment; /* @@ -272,6 +261,8 @@ typedef struct { * @_refcount: Do not access this member directly. Use folio_ref_count() * to find how many references there are to this folio. * @memcg_data: Memory Control Group data. + * @virtual: Virtual address in the kernel direct map. + * @_last_cpupid: IDs of last CPU and last process that accessed the folio. * @_entire_mapcount: Do not use directly, call folio_entire_mapcount(). * @_nr_pages_mapped: Do not use directly, call folio_mapcount(). * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). @@ -318,6 +309,12 @@ struct folio { #ifdef CONFIG_MEMCG unsigned long memcg_data; #endif +#if defined(WANT_PAGE_VIRTUAL) + void *virtual; +#endif +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; +#endif /* private: the union with struct page is transitional */ }; struct page page; @@ -373,6 +370,12 @@ FOLIO_MATCH(_refcount, _refcount); #ifdef CONFIG_MEMCG FOLIO_MATCH(memcg_data, memcg_data); #endif +#if defined(WANT_PAGE_VIRTUAL) +FOLIO_MATCH(virtual, virtual); +#endif +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS +FOLIO_MATCH(_last_cpupid, _last_cpupid); +#endif #undef FOLIO_MATCH #define FOLIO_MATCH(pg, fl) \ static_assert(offsetof(struct folio, fl) == \ @@ -398,11 +401,11 @@ FOLIO_MATCH(compound_head, _head_2a); * @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs. * @__page_mapping: Aliases with page->mapping. Unused for page tables. * @pt_mm: Used for x86 pgds. - * @pt_frag_refcount: For fragmented page table tracking. Powerpc and s390 only. + * @pt_frag_refcount: For fragmented page table tracking. Powerpc only. * @_pt_pad_2: Padding to ensure proper alignment. * @ptl: Lock for the page table. * @__page_type: Same as page->page_type. Unused for page tables. - * @_refcount: Same as page refcount. Used for s390 page tables. + * @__page_refcount: Same as page refcount. * @pt_memcg_data: Memcg data. Tracked for page tables here. * * This struct overlays struct page for now. Do not modify without a good @@ -435,7 +438,7 @@ struct ptdesc { #endif }; unsigned int __page_type; - atomic_t _refcount; + atomic_t __page_refcount; #ifdef CONFIG_MEMCG unsigned long pt_memcg_data; #endif @@ -449,7 +452,7 @@ TABLE_MATCH(compound_head, _pt_pad_1); TABLE_MATCH(mapping, __page_mapping); TABLE_MATCH(rcu_head, pt_rcu_head); TABLE_MATCH(page_type, __page_type); -TABLE_MATCH(_refcount, _refcount); +TABLE_MATCH(_refcount, __page_refcount); #ifdef CONFIG_MEMCG TABLE_MATCH(memcg_data, pt_memcg_data); #endif @@ -546,14 +549,65 @@ struct anon_vma_name { char name[]; }; +#ifdef CONFIG_ANON_VMA_NAME +/* + * mmap_lock should be read-locked when calling anon_vma_name(). Caller should + * either keep holding the lock while using the returned pointer or it should + * raise anon_vma_name refcount before releasing the lock. + */ +struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma); +struct anon_vma_name *anon_vma_name_alloc(const char *name); +void anon_vma_name_free(struct kref *kref); +#else /* CONFIG_ANON_VMA_NAME */ +static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) +{ + return NULL; +} + +static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) +{ + return NULL; +} +#endif + struct vma_lock { struct rw_semaphore lock; }; struct vma_numab_state { + /* + * Initialised as time in 'jiffies' after which VMA + * should be scanned. Delays first scan of new VMA by at + * least sysctl_numa_balancing_scan_delay: + */ unsigned long next_scan; - unsigned long next_pid_reset; - unsigned long access_pids[2]; + + /* + * Time in jiffies when pids_active[] is reset to + * detect phase change behaviour: + */ + unsigned long pids_active_reset; + + /* + * Approximate tracking of PIDs that trapped a NUMA hinting + * fault. May produce false positives due to hash collisions. + * + * [0] Previous PID tracking + * [1] Current PID tracking + * + * Window moves after next_pid_reset has expired approximately + * every VMA_PID_RESET_PERIOD jiffies: + */ + unsigned long pids_active[2]; + + /* MM scan sequence ID when scan first started after VMA creation */ + int start_scan_seq; + + /* + * MM scan sequence ID when the VMA was last completely scanned. + * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq + */ + int prev_scan_seq; }; /* @@ -662,6 +716,12 @@ struct vm_area_struct { struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; +#ifdef CONFIG_NUMA +#define vma_policy(vma) ((vma)->vm_policy) +#else +#define vma_policy(vma) NULL +#endif + #ifdef CONFIG_SCHED_MM_CID struct mm_cid { u64 time; @@ -670,6 +730,7 @@ struct mm_cid { #endif struct kioctx_table; +struct iommu_mm_data; struct mm_struct { struct { /* @@ -881,8 +942,8 @@ struct mm_struct { #endif struct work_struct async_put_work; -#ifdef CONFIG_IOMMU_SVA - u32 pasid; +#ifdef CONFIG_IOMMU_MM_DATA + struct iommu_mm_data *iommu_mm; #endif #ifdef CONFIG_KSM /* @@ -901,7 +962,7 @@ struct mm_struct { */ unsigned long ksm_zero_pages; #endif /* CONFIG_KSM */ -#ifdef CONFIG_LRU_GEN +#ifdef CONFIG_LRU_GEN_WALKS_MMU struct { /* this mm_struct is on lru_gen_mm_list */ struct list_head list; @@ -916,7 +977,7 @@ struct mm_struct { struct mem_cgroup *memcg; #endif } lru_gen; -#endif /* CONFIG_LRU_GEN */ +#endif /* CONFIG_LRU_GEN_WALKS_MMU */ } __randomize_layout; /* @@ -954,11 +1015,13 @@ struct lru_gen_mm_list { spinlock_t lock; }; +#endif /* CONFIG_LRU_GEN */ + +#ifdef CONFIG_LRU_GEN_WALKS_MMU + void lru_gen_add_mm(struct mm_struct *mm); void lru_gen_del_mm(struct mm_struct *mm); -#ifdef CONFIG_MEMCG void lru_gen_migrate_mm(struct mm_struct *mm); -#endif static inline void lru_gen_init_mm(struct mm_struct *mm) { @@ -979,7 +1042,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm) WRITE_ONCE(mm->lru_gen.bitmap, -1); } -#else /* !CONFIG_LRU_GEN */ +#else /* !CONFIG_LRU_GEN_WALKS_MMU */ static inline void lru_gen_add_mm(struct mm_struct *mm) { @@ -989,11 +1052,9 @@ static inline void lru_gen_del_mm(struct mm_struct *mm) { } -#ifdef CONFIG_MEMCG static inline void lru_gen_migrate_mm(struct mm_struct *mm) { } -#endif static inline void lru_gen_init_mm(struct mm_struct *mm) { @@ -1003,7 +1064,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm) { } -#endif /* CONFIG_LRU_GEN */ +#endif /* CONFIG_LRU_GEN_WALKS_MMU */ struct vma_iterator { struct ma_state mas; @@ -1014,7 +1075,8 @@ struct vma_iterator { .mas = { \ .tree = &(__mm)->mm_mt, \ .index = __addr, \ - .node = MAS_START, \ + .node = NULL, \ + .status = ma_start, \ }, \ } diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index aa44fff8bb9d..a2f6179b672b 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -9,9 +9,6 @@ */ #include <linux/types.h> -#include <linux/threads.h> -#include <linux/atomic.h> -#include <linux/cpumask.h> #include <asm/page.h> @@ -36,6 +33,8 @@ enum { NR_MM_COUNTERS }; +struct page; + struct page_frag { struct page *page; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) diff --git a/include/linux/mman.h b/include/linux/mman.h index 40d94411d492..dc7048824be8 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -156,6 +156,7 @@ calc_vm_flag_bits(unsigned long flags) return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | + _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) | arch_calc_vm_flag_bits(flags); } diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index daa2f40d9ce6..f34407cc2788 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -32,6 +32,7 @@ struct mmc_csd { unsigned int r2w_factor; unsigned int max_dtr; unsigned int erase_size; /* In sectors */ + unsigned int wp_grp_size; unsigned int read_blkbits; unsigned int write_blkbits; unsigned int capacity; @@ -52,9 +53,6 @@ struct mmc_ext_csd { u8 part_config; u8 cache_ctrl; u8 rst_n_function; - u8 max_packed_writes; - u8 max_packed_reads; - u8 packed_event_en; unsigned int part_time; /* Units: ms */ unsigned int sa_timeout; /* Units: 100ns */ unsigned int generic_cmd6_time; /* Units: 10ms */ @@ -295,7 +293,9 @@ struct mmc_card { #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ #define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */ #define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */ +#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */ + bool written_flag; /* Indicates eMMC has been written since power on */ bool reenable_cmdq; /* Re-enable Command Queue */ unsigned int erase_size; /* erase size in sectors */ @@ -304,6 +304,7 @@ struct mmc_card { unsigned int eg_boundary; /* don't cross erase-group boundaries */ unsigned int erase_arg; /* erase / trim / discard */ u8 erased_byte; /* value of erased bytes */ + unsigned int wp_grp_size; /* write group size in sectors */ u32 raw_cid[4]; /* raw card CID */ u32 raw_csd[4]; /* raw card CSD */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 6efec0b9820c..2c7928a50907 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -27,7 +27,6 @@ struct mmc_command { u32 opcode; u32 arg; #define MMC_CMD23_ARG_REL_WR (1 << 31) -#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30)) #define MMC_CMD23_ARG_TAG_REQ (1 << 29) u32 resp[4]; unsigned int flags; /* expected response type */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 62a6847a3b6f..2f445c651742 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -526,6 +526,7 @@ struct mmc_host { /* Host Software Queue support */ bool hsq_enabled; + int hsq_depth; u32 err_stats[MMC_ERR_MAX]; unsigned long private[] ____cacheline_aligned; diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 6f7993803ee7..cf2bcb5da30d 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -257,8 +257,6 @@ static inline bool mmc_ready_for_data(u32 status) #define EXT_CSD_FLUSH_CACHE 32 /* W */ #define EXT_CSD_CACHE_CTRL 33 /* R/W */ #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ -#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */ -#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */ #define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */ #define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */ #define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ @@ -321,8 +319,6 @@ static inline bool mmc_ready_for_data(u32 status) #define EXT_CSD_SUPPORTED_MODE 493 /* RO */ #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ -#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ -#define EXT_CSD_MAX_PACKED_READS 501 /* RO */ #define EXT_CSD_BKOPS_SUPPORT 502 /* RO */ #define EXT_CSD_HPI_FEATURES 503 /* RO */ @@ -402,18 +398,12 @@ static inline bool mmc_ready_for_data(u32 status) #define EXT_CSD_PWR_CL_8BIT_SHIFT 4 #define EXT_CSD_PWR_CL_4BIT_SHIFT 0 -#define EXT_CSD_PACKED_EVENT_EN BIT(3) - /* * EXCEPTION_EVENT_STATUS field */ #define EXT_CSD_URGENT_BKOPS BIT(0) #define EXT_CSD_DYNCAP_NEEDED BIT(1) #define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2) -#define EXT_CSD_PACKED_FAILURE BIT(3) - -#define EXT_CSD_PACKED_GENERIC_ERROR BIT(0) -#define EXT_CSD_PACKED_INDEXED_ERROR BIT(1) /* * BKOPS status level diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 6e3c857606f1..f349e08a9dfe 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -459,7 +459,14 @@ mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) lock_map_release(&__mmu_notifier_invalidate_range_start_map); } -static inline int +/* + * This version of mmu_notifier_invalidate_range_start() avoids blocking, but it + * can return an error if a notifier can't proceed without blocking, in which + * case you're not allowed to modify PTEs in the specified range. + * + * This is mainly intended for OOM handling. + */ +static inline int __must_check mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) { int ret = 0; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 4106fbc5b4b3..a497f189d988 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -22,18 +22,21 @@ #include <linux/mm_types.h> #include <linux/page-flags.h> #include <linux/local_lock.h> +#include <linux/zswap.h> #include <asm/page.h> /* Free memory management - zoned buddy allocator. */ #ifndef CONFIG_ARCH_FORCE_MAX_ORDER -#define MAX_ORDER 10 +#define MAX_PAGE_ORDER 10 #else -#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER +#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER #endif -#define MAX_ORDER_NR_PAGES (1 << MAX_ORDER) +#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER) #define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES) +#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1) + /* * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed * costly to service. That is between allocation orders which should @@ -95,7 +98,7 @@ static inline bool migratetype_is_mergeable(int mt) } #define for_each_migratetype_order(order, type) \ - for (order = 0; order <= MAX_ORDER; order++) \ + for (order = 0; order < NR_PAGE_ORDERS; order++) \ for (type = 0; type < MIGRATE_TYPES; type++) extern int page_group_by_mobility_disabled; @@ -207,6 +210,10 @@ enum node_stat_item { PGPROMOTE_SUCCESS, /* promote successfully */ PGPROMOTE_CANDIDATE, /* candidate pages to promote */ #endif + /* PGDEMOTE_*: pages demoted */ + PGDEMOTE_KSWAPD, + PGDEMOTE_DIRECT, + PGDEMOTE_KHUGEPAGED, NR_VM_NODE_STAT_ITEMS }; @@ -435,14 +442,12 @@ struct lru_gen_folio { atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; /* whether the multi-gen LRU is enabled */ bool enabled; -#ifdef CONFIG_MEMCG /* the memcg generation this lru_gen_folio belongs to */ u8 gen; /* the list segment this lru_gen_folio belongs to */ u8 seg; /* per-node lru_gen_folio list for global reclaim */ struct hlist_nulls_node list; -#endif }; enum { @@ -488,11 +493,6 @@ struct lru_gen_mm_walk { bool force_scan; }; -void lru_gen_init_lruvec(struct lruvec *lruvec); -void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); - -#ifdef CONFIG_MEMCG - /* * For each node, memcgs are divided into two generations: the old and the * young. For each generation, memcgs are randomly sharded into multiple bins @@ -505,33 +505,37 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); * the old generation, is incremented when all its bins become empty. * * There are four operations: - * 1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in its + * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its * current generation (old or young) and updates its "seg" to "head"; - * 2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in its + * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its * current generation (old or young) and updates its "seg" to "tail"; - * 3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in the old + * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old * generation, updates its "gen" to "old" and resets its "seg" to "default"; - * 4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin in the + * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the * young generation, updates its "gen" to "young" and resets its "seg" to * "default". * * The events that trigger the above operations are: * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD; - * 2. The first attempt to reclaim an memcg below low, which triggers + * 2. The first attempt to reclaim a memcg below low, which triggers * MEMCG_LRU_TAIL; - * 3. The first attempt to reclaim an memcg below reclaimable size threshold, - * which triggers MEMCG_LRU_TAIL; - * 4. The second attempt to reclaim an memcg below reclaimable size threshold, - * which triggers MEMCG_LRU_YOUNG; - * 5. Attempting to reclaim an memcg below min, which triggers MEMCG_LRU_YOUNG; + * 3. The first attempt to reclaim a memcg offlined or below reclaimable size + * threshold, which triggers MEMCG_LRU_TAIL; + * 4. The second attempt to reclaim a memcg offlined or below reclaimable size + * threshold, which triggers MEMCG_LRU_YOUNG; + * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG; * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG; - * 7. Offlining an memcg, which triggers MEMCG_LRU_OLD. + * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD. * - * Note that memcg LRU only applies to global reclaim, and the round-robin - * incrementing of their max_seq counters ensures the eventual fairness to all - * eligible memcgs. For memcg reclaim, it still relies on mem_cgroup_iter(). + * Notes: + * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing + * of their max_seq counters ensures the eventual fairness to all eligible + * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter(). + * 2. There are only two valid generations: old (seq) and young (seq+1). + * MEMCG_NR_GENS is set to three so that when reading the generation counter + * locklessly, a stale value (seq-1) does not wraparound to young. */ -#define MEMCG_NR_GENS 2 +#define MEMCG_NR_GENS 3 #define MEMCG_NR_BINS 8 struct lru_gen_memcg { @@ -546,6 +550,8 @@ struct lru_gen_memcg { }; void lru_gen_init_pgdat(struct pglist_data *pgdat); +void lru_gen_init_lruvec(struct lruvec *lruvec); +void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); void lru_gen_init_memcg(struct mem_cgroup *memcg); void lru_gen_exit_memcg(struct mem_cgroup *memcg); @@ -554,19 +560,6 @@ void lru_gen_offline_memcg(struct mem_cgroup *memcg); void lru_gen_release_memcg(struct mem_cgroup *memcg); void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); -#else /* !CONFIG_MEMCG */ - -#define MEMCG_NR_GENS 1 - -struct lru_gen_memcg { -}; - -static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) -{ -} - -#endif /* CONFIG_MEMCG */ - #else /* !CONFIG_LRU_GEN */ static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) @@ -581,8 +574,6 @@ static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) { } -#ifdef CONFIG_MEMCG - static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) { } @@ -607,8 +598,6 @@ static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) { } -#endif /* CONFIG_MEMCG */ - #endif /* CONFIG_LRU_GEN */ struct lruvec { @@ -631,16 +620,17 @@ struct lruvec { #ifdef CONFIG_LRU_GEN /* evictable pages divided into generations */ struct lru_gen_folio lrugen; +#ifdef CONFIG_LRU_GEN_WALKS_MMU /* to concurrently iterate lru_gen_mm_list */ struct lru_gen_mm_state mm_state; #endif +#endif /* CONFIG_LRU_GEN */ #ifdef CONFIG_MEMCG struct pglist_data *pgdat; #endif + struct zswap_lruvec_state zswap_lruvec_state; }; -/* Isolate unmapped pages */ -#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) /* Isolate for asynchronous migration */ #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) /* Isolate unevictable pages */ @@ -676,15 +666,34 @@ enum zone_watermarks { #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) +/* + * Flags used in pcp->flags field. + * + * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the + * previous page freeing. To avoid to drain PCP for an accident + * high-order page freeing. + * + * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before + * draining PCP for consecutive high-order pages freeing without + * allocation if data cache slice of CPU is large enough. To reduce + * zone lock contention and keep cache-hot pages reusing. + */ +#define PCPF_PREV_FREE_HIGH_ORDER BIT(0) +#define PCPF_FREE_HIGH_BATCH BIT(1) + struct per_cpu_pages { spinlock_t lock; /* Protects lists field */ int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ + int high_min; /* min high watermark */ + int high_max; /* max high watermark */ int batch; /* chunk size for buddy add/remove */ - short free_factor; /* batch scaling factor during free */ + u8 flags; /* protected by pcp->lock */ + u8 alloc_factor; /* batch scaling factor during allocate */ #ifdef CONFIG_NUMA - short expire; /* When 0, remote pagesets are drained */ + u8 expire; /* When 0, remote pagesets are drained */ #endif + short free_count; /* consecutive free count */ /* Lists of pages, one per migrate type stored on the pcp-lists */ struct list_head lists[NR_PCP_LISTS]; @@ -837,7 +846,8 @@ struct zone { * the high and batch values are copied to individual pagesets for * faster access */ - int pageset_high; + int pageset_high_min; + int pageset_high_max; int pageset_batch; #ifndef CONFIG_SPARSEMEM @@ -925,10 +935,10 @@ struct zone { CACHELINE_PADDING(_pad1_); /* free areas of different sizes */ - struct free_area free_area[MAX_ORDER + 1]; + struct free_area free_area[NR_PAGE_ORDERS]; #ifdef CONFIG_UNACCEPTED_MEMORY - /* Pages to be accepted. All pages on the list are MAX_ORDER */ + /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */ struct list_head unaccepted_pages; #endif @@ -998,6 +1008,7 @@ enum zone_flags { * Cleared when kswapd is woken. */ ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ + ZONE_BELOW_HIGH, /* zone is below high watermark. */ }; static inline unsigned long zone_managed_pages(struct zone *zone) @@ -1737,8 +1748,8 @@ static inline bool movable_only_nodes(nodemask_t *nodes) #define SECTION_BLOCKFLAGS_BITS \ ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) -#if (MAX_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS -#error Allocator MAX_ORDER exceeds SECTION_SIZE +#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS +#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE #endif static inline unsigned long pfn_to_section_nr(unsigned long pfn) @@ -1770,6 +1781,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec) #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) struct mem_section_usage { + struct rcu_head rcu; #ifdef CONFIG_SPARSEMEM_VMEMMAP DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); #endif @@ -1963,7 +1975,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) { int idx = subsection_map_index(pfn); - return test_bit(idx, ms->usage->subsection_map); + return test_bit(idx, READ_ONCE(ms->usage)->subsection_map); } #else static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) @@ -1987,6 +1999,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) static inline int pfn_valid(unsigned long pfn) { struct mem_section *ms; + int ret; /* * Ensure the upper PAGE_SHIFT bits are clear in the @@ -2000,13 +2013,19 @@ static inline int pfn_valid(unsigned long pfn) if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; ms = __pfn_to_section(pfn); - if (!valid_section(ms)) + rcu_read_lock_sched(); + if (!valid_section(ms)) { + rcu_read_unlock_sched(); return 0; + } /* * Traditionally early sections always returned pfn_valid() for * the entire section-sized span. */ - return early_section(ms) || pfn_section_valid(ms, pfn); + ret = early_section(ms) || pfn_section_valid(ms, pfn); + rcu_read_unlock_sched(); + + return ret; } #endif diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h index 057c89867aa2..cd4d5c8781f5 100644 --- a/include/linux/mnt_idmapping.h +++ b/include/linux/mnt_idmapping.h @@ -115,6 +115,9 @@ static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid) int vfsgid_in_group_p(vfsgid_t vfsgid); +struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap); +void mnt_idmap_put(struct mnt_idmap *idmap); + vfsuid_t make_vfsuid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, kuid_t kuid); @@ -241,7 +244,4 @@ static inline kgid_t mapped_fsgid(struct mnt_idmap *idmap, return from_vfsgid(idmap, fs_userns, VFSGIDT_INIT(current_fsgid())); } -bool check_fsmapping(const struct mnt_idmap *idmap, - const struct super_block *sb); - #endif /* _LINUX_MNT_IDMAPPING_H */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index b0678b093cb2..f458469c5ce5 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -935,6 +935,12 @@ enum { * struct cdx_device_id - CDX device identifier * @vendor: Vendor ID * @device: Device ID + * @subvendor: Subsystem vendor ID (or CDX_ANY_ID) + * @subdevice: Subsystem device ID (or CDX_ANY_ID) + * @class: Device class + * Most drivers do not need to specify class/class_mask + * as vendor/device is normally sufficient. + * @class_mask: Limit which sub-fields of the class field are compared. * @override_only: Match only when dev->driver_override is this driver. * * Type of entries in the "device Id" table for CDX devices supported by @@ -943,7 +949,15 @@ enum { struct cdx_device_id { __u16 vendor; __u16 device; + __u16 subvendor; + __u16 subdevice; + __u32 class; + __u32 class_mask; __u32 override_only; }; +struct vchiq_device_id { + char name[32]; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index a98e188cf37b..96bc462872c0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -540,6 +540,8 @@ struct module { struct static_call_site *static_call_sites; #endif #if IS_ENABLED(CONFIG_KUNIT) + int num_kunit_init_suites; + struct kunit_suite **kunit_init_suites; int num_kunit_suites; struct kunit_suite **kunit_suites; #endif @@ -668,7 +670,7 @@ extern void __module_get(struct module *module); * @module: the module we should check for * * Only try to get a module reference count if the module is not being removed. - * This call will fail if the module is already being removed. + * This call will fail if the module is in the process of being removed. * * Care must also be taken to ensure the module exists and is alive prior to * usage of this call. This can be gauranteed through two means: diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h index 1269543d0634..77c9895b9ddb 100644 --- a/include/linux/module_symbol.h +++ b/include/linux/module_symbol.h @@ -3,7 +3,7 @@ #define _LINUX_MODULE_SYMBOL_H /* This ignores the intensely annoying "mapping symbols" found in ELF files. */ -static inline int is_mapping_symbol(const char *str) +static inline bool is_mapping_symbol(const char *str) { if (str[0] == '.' && str[1] == 'L') return true; diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 962cd41a2cb5..bfb85fd13e1f 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -276,7 +276,7 @@ struct kparam_array read-only sections (which is part of respective UNIX ABI on these platforms). So 'const' makes no sense and even causes compile failures with some compilers. */ -#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) +#if defined(CONFIG_ALPHA) || defined(CONFIG_PPC64) #define __moduleparam_const #else #define __moduleparam_const const @@ -293,7 +293,11 @@ struct kparam_array = { __param_str_##name, THIS_MODULE, ops, \ VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } } -/* Obsolete - use module_param_cb() */ +/* + * Useful for describing a set/get pair used only once (i.e. for this + * parameter). For repeated set/get pairs (i.e. the same struct + * kernel_param_ops), use module_param_cb() instead. + */ #define module_param_call(name, _set, _get, arg, perm) \ static const struct kernel_param_ops __param_ops_##name = \ { .flags = 0, .set = _set, .get = _get }; \ @@ -381,6 +385,8 @@ extern bool parameq(const char *name1, const char *name2); */ extern bool parameqn(const char *name1, const char *name2, size_t n); +typedef int (*parse_unknown_fn)(char *param, char *val, const char *doing, void *arg); + /* Called on module insert or kernel boot */ extern char *parse_args(const char *name, char *args, @@ -388,9 +394,7 @@ extern char *parse_args(const char *name, unsigned num, s16 level_min, s16 level_max, - void *arg, - int (*unknown)(char *param, char *val, - const char *doing, void *arg)); + void *arg, parse_unknown_fn unknown); /* Called by module remove. */ #ifdef CONFIG_SYSFS diff --git a/include/linux/mount.h b/include/linux/mount.h index 4f40b40306d0..c34c18b4e8f3 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -50,8 +50,7 @@ struct path; #define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ - MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \ - MNT_CURSOR) + MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB) #define MNT_INTERNAL 0x4000 @@ -65,7 +64,7 @@ struct path; #define MNT_SYNC_UMOUNT 0x2000000 #define MNT_MARKED 0x4000000 #define MNT_UMOUNT 0x8000000 -#define MNT_CURSOR 0x10000000 +#define MNT_ONRB 0x10000000 struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ @@ -92,8 +91,8 @@ extern bool __mnt_is_readonly(struct vfsmount *mnt); extern bool mnt_may_suid(struct vfsmount *mnt); extern struct vfsmount *clone_private_mount(const struct path *path); -extern int __mnt_want_write(struct vfsmount *); -extern void __mnt_drop_write(struct vfsmount *); +int mnt_get_write_access(struct vfsmount *mnt); +void mnt_put_write_access(struct vfsmount *mnt); extern struct vfsmount *fc_mount(struct fs_context *fc); extern struct vfsmount *vfs_create_mount(struct fs_context *fc); diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h index 79184948fab4..ac577699edfd 100644 --- a/include/linux/moxtet.h +++ b/include/linux/moxtet.h @@ -35,8 +35,6 @@ enum turris_mox_module_id { #define MOXTET_NIRQS 16 -extern struct bus_type moxtet_type; - struct moxtet { struct device *dev; struct mutex lock; diff --git a/include/linux/msi.h b/include/linux/msi.h index a50ea79522f8..ddace8c34dcf 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -547,12 +547,6 @@ enum { MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5), /* Free MSI descriptors */ MSI_FLAG_FREE_MSI_DESCS = (1 << 6), - /* - * Quirk to handle MSI implementations which do not provide - * masking. Currently known to affect x86, but has to be partially - * handled in the core MSI code. - */ - MSI_FLAG_NOMASK_QUIRK = (1 << 7), /* Mask for the generic functionality */ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0), diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index d88bb56c18e2..947410faf9e2 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -287,7 +287,7 @@ struct cfi_private { unsigned long chipshift; /* Because they're of the same type */ const char *im_name; /* inter_module name for cmdset_setup */ unsigned long quirks; - struct flchip chips[]; /* per-chip data structure for each chip */ + struct flchip chips[] __counted_by(numchips); /* per-chip data structure for each chip */ }; uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h index 0b6b59f7cfbd..56047a4e54c9 100644 --- a/include/linux/mtd/jedec.h +++ b/include/linux/mtd/jedec.h @@ -21,6 +21,9 @@ struct jedec_ecc_info { /* JEDEC features */ #define JEDEC_FEATURE_16_BIT_BUS (1 << 0) +/* JEDEC Optional Commands */ +#define JEDEC_OPT_CMD_READ_CACHE BIT(1) + struct nand_jedec_params { /* rev info and features block */ /* 'J' 'E' 'S' 'D' */ diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h index a7376f9beddf..55ab2e4d62f9 100644 --- a/include/linux/mtd/onfi.h +++ b/include/linux/mtd/onfi.h @@ -55,6 +55,7 @@ #define ONFI_SUBFEATURE_PARAM_LEN 4 /* ONFI optional commands SET/GET FEATURES supported? */ +#define ONFI_OPT_CMD_READ_CACHE BIT(1) #define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2) struct nand_onfi_params { diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index 2e3f43788d48..0421f12156b5 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -24,7 +24,7 @@ struct lpddr_private { struct qinfo_chip *qinfo; int numchips; unsigned long chipshift; - struct flchip chips[]; + struct flchip chips[] __counted_by(numchips); }; /* qinfo_query_info structure contains request information for diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 90a141ba2a5a..e84522e31301 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -225,6 +225,7 @@ struct gpio_desc; * struct nand_parameters - NAND generic parameters from the parameter page * @model: Model name * @supports_set_get_features: The NAND chip supports setting/getting features + * @supports_read_cache: The NAND chip supports read cache operations * @set_feature_list: Bitmap of features that can be set * @get_feature_list: Bitmap of features that can be get * @onfi: ONFI specific parameters @@ -233,6 +234,7 @@ struct nand_parameters { /* Generic parameters */ const char *model; bool supports_set_get_features; + bool supports_read_cache; DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER); DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER); @@ -1001,6 +1003,8 @@ struct nand_op_parser { /** * struct nand_operation - NAND operation descriptor * @cs: the CS line to select for this NAND operation + * @deassert_wp: set to true when the operation requires the WP pin to be + * de-asserted (ERASE, PROG, ...) * @instrs: array of instructions to execute * @ninstrs: length of the @instrs array * @@ -1008,6 +1012,7 @@ struct nand_op_parser { */ struct nand_operation { unsigned int cs; + bool deassert_wp; const struct nand_op_instr *instrs; unsigned int ninstrs; }; @@ -1019,6 +1024,14 @@ struct nand_operation { .ninstrs = ARRAY_SIZE(_instrs), \ } +#define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs) \ + { \ + .cs = _cs, \ + .deassert_wp = true, \ + .instrs = _instrs, \ + .ninstrs = ARRAY_SIZE(_instrs), \ + } + int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_op_parser *parser, const struct nand_operation *op, bool check_only); @@ -1102,6 +1115,7 @@ struct nand_controller_ops { * the bus without restarting an entire read operation nor * changing the column. * @supported_op.cont_read: The controller supports sequential cache reads. + * @controller_wp: the controller is in charge of handling the WP pin. */ struct nand_controller { struct mutex lock; @@ -1110,6 +1124,7 @@ struct nand_controller { unsigned int data_only_read: 1; unsigned int cont_read: 1; } supported_op; + bool controller_wp; }; static inline void nand_controller_init(struct nand_controller *nfc) @@ -1263,6 +1278,7 @@ struct nand_secure_region { * @cont_read: Sequential page read internals * @cont_read.ongoing: Whether a continuous read is ongoing or not * @cont_read.first_page: Start of the continuous read operation + * @cont_read.pause_page: End of the current sequential cache read operation * @cont_read.last_page: End of the continuous read operation * @controller: The hardware controller structure which is shared among multiple * independent devices @@ -1319,6 +1335,7 @@ struct nand_chip { struct { bool ongoing; unsigned int first_page; + unsigned int pause_page; unsigned int last_page; } cont_read; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 3e285c09d16d..badb4c1ac079 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -263,6 +263,7 @@ struct spinand_manufacturer { extern const struct spinand_manufacturer alliancememory_spinand_manufacturer; extern const struct spinand_manufacturer ato_spinand_manufacturer; extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer; +extern const struct spinand_manufacturer foresee_spinand_manufacturer; extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; diff --git a/include/linux/mutex.h b/include/linux/mutex.h index a33aa9eb9fc3..7e208d46ba5b 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -20,6 +20,7 @@ #include <linux/osq_lock.h> #include <linux/debug_locks.h> #include <linux/cleanup.h> +#include <linux/mutex_types.h> #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ @@ -33,49 +34,6 @@ #ifndef CONFIG_PREEMPT_RT -/* - * Simple, straightforward mutexes with strict semantics: - * - * - only one task can hold the mutex at a time - * - only the owner can unlock the mutex - * - multiple unlocks are not permitted - * - recursive locking is not permitted - * - a mutex object must be initialized via the API - * - a mutex object must not be initialized via memset or copying - * - task may not exit with mutex held - * - memory areas where held locks reside must not be freed - * - held mutexes must not be reinitialized - * - mutexes may not be used in hardware or software interrupt - * contexts such as tasklets and timers - * - * These semantics are fully enforced when DEBUG_MUTEXES is - * enabled. Furthermore, besides enforcing the above rules, the mutex - * debugging code also implements a number of additional features - * that make lock debugging easier and faster: - * - * - uses symbolic names of mutexes, whenever they are printed in debug output - * - point-of-acquire tracking, symbolic lookup of function names - * - list of all locks held in the system, printout of them - * - owner tracking - * - detects self-recursing locks and prints out all relevant info - * - detects multi-task circular deadlocks and prints out all affected - * locks and tasks (and only those tasks) - */ -struct mutex { - atomic_long_t owner; - raw_spinlock_t wait_lock; -#ifdef CONFIG_MUTEX_SPIN_ON_OWNER - struct optimistic_spin_queue osq; /* Spinner MCS lock */ -#endif - struct list_head wait_list; -#ifdef CONFIG_DEBUG_MUTEXES - void *magic; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - #ifdef CONFIG_DEBUG_MUTEXES #define __DEBUG_MUTEX_INITIALIZER(lockname) \ @@ -131,14 +89,6 @@ extern bool mutex_is_locked(struct mutex *lock); /* * Preempt-RT variant based on rtmutexes. */ -#include <linux/rtmutex.h> - -struct mutex { - struct rt_mutex_base rtmutex; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; #define __MUTEX_INITIALIZER(mutexname) \ { \ @@ -221,6 +171,7 @@ extern void mutex_unlock(struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T)) -DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T)) +DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T)) +DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0) #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h new file mode 100644 index 000000000000..fdf7f515fde8 --- /dev/null +++ b/include/linux/mutex_types.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MUTEX_TYPES_H +#define __LINUX_MUTEX_TYPES_H + +#include <linux/atomic.h> +#include <linux/lockdep_types.h> +#include <linux/osq_lock.h> +#include <linux/spinlock_types.h> +#include <linux/types.h> + +#ifndef CONFIG_PREEMPT_RT + +/* + * Simple, straightforward mutexes with strict semantics: + * + * - only one task can hold the mutex at a time + * - only the owner can unlock the mutex + * - multiple unlocks are not permitted + * - recursive locking is not permitted + * - a mutex object must be initialized via the API + * - a mutex object must not be initialized via memset or copying + * - task may not exit with mutex held + * - memory areas where held locks reside must not be freed + * - held mutexes must not be reinitialized + * - mutexes may not be used in hardware or software interrupt + * contexts such as tasklets and timers + * + * These semantics are fully enforced when DEBUG_MUTEXES is + * enabled. Furthermore, besides enforcing the above rules, the mutex + * debugging code also implements a number of additional features + * that make lock debugging easier and faster: + * + * - uses symbolic names of mutexes, whenever they are printed in debug output + * - point-of-acquire tracking, symbolic lookup of function names + * - list of all locks held in the system, printout of them + * - owner tracking + * - detects self-recursing locks and prints out all relevant info + * - detects multi-task circular deadlocks and prints out all affected + * locks and tasks (and only those tasks) + */ +struct mutex { + atomic_long_t owner; + raw_spinlock_t wait_lock; +#ifdef CONFIG_MUTEX_SPIN_ON_OWNER + struct optimistic_spin_queue osq; /* Spinner MCS lock */ +#endif + struct list_head wait_list; +#ifdef CONFIG_DEBUG_MUTEXES + void *magic; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#else /* !CONFIG_PREEMPT_RT */ +/* + * Preempt-RT variant based on rtmutexes. + */ +#include <linux/rtmutex.h> + +struct mutex { + struct rt_mutex_base rtmutex; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#endif /* CONFIG_PREEMPT_RT */ + +#endif /* __LINUX_MUTEX_TYPES_H */ diff --git a/include/linux/namei.h b/include/linux/namei.h index 1463cbda4888..74e0cc14ebf8 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -66,6 +66,7 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int); extern void done_path_create(struct path *, struct dentry *); extern struct dentry *kern_path_locked(const char *, struct path *); +extern struct dentry *user_path_locked_at(int , const char __user *, struct path *); int vfs_path_parent_lookup(struct filename *filename, unsigned int flags, struct path *parent, struct qstr *last, int *type, const struct path *root); @@ -92,6 +93,30 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *); extern struct dentry *lock_rename_child(struct dentry *, struct dentry *); extern void unlock_rename(struct dentry *, struct dentry *); +/** + * mode_strip_umask - handle vfs umask stripping + * @dir: parent directory of the new inode + * @mode: mode of the new inode to be created in @dir + * + * In most filesystems, umask stripping depends on whether or not the + * filesystem supports POSIX ACLs. If the filesystem doesn't support it umask + * stripping is done directly in here. If the filesystem does support POSIX + * ACLs umask stripping is deferred until the filesystem calls + * posix_acl_create(). + * + * Some filesystems (like NFSv4) also want to avoid umask stripping by the + * VFS, but don't support POSIX ACLs. Those filesystems can set SB_I_NOUMASK + * to get this effect without declaring that they support POSIX ACLs. + * + * Returns: mode + */ +static inline umode_t __must_check mode_strip_umask(const struct inode *dir, umode_t mode) +{ + if (!IS_POSIXACL(dir) && !(dir->i_sb->s_iflags & SB_I_NOUMASK)) + mode &= ~current_umask(); + return mode; +} + extern int __must_check nd_jump_link(const struct path *path); static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) @@ -112,7 +137,7 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) static inline bool retry_estale(const long error, const unsigned int flags) { - return error == -ESTALE && !(flags & LOOKUP_REVAL); + return unlikely(error == -ESTALE && !(flags & LOOKUP_REVAL)); } #endif /* _LINUX_NAMEI_H */ diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h index ed42bd5f639f..0aa4411528fc 100644 --- a/include/linux/net/intel/i40e_client.h +++ b/include/linux/net/intel/i40e_client.h @@ -45,7 +45,7 @@ struct i40e_qv_info { struct i40e_qvlist_info { u32 num_vectors; - struct i40e_qv_info qv_info[]; + struct i40e_qv_info qv_info[] __counted_by(num_vectors); }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0896aaa91dd7..118c40258d07 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -79,6 +79,8 @@ struct xdp_buff; struct xdp_frame; struct xdp_metadata_ops; struct xdp_md; +/* DPLL specific */ +struct dpll_pin; typedef u32 xdp_features_t; @@ -380,6 +382,7 @@ struct napi_struct { /* control-path-only fields follow */ struct list_head dev_list; struct hlist_node napi_hash_node; + int irq; }; enum { @@ -480,6 +483,29 @@ static inline bool napi_prefer_busy_poll(struct napi_struct *n) return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); } +/** + * napi_is_scheduled - test if NAPI is scheduled + * @n: NAPI context + * + * This check is "best-effort". With no locking implemented, + * a NAPI can be scheduled or terminate right after this check + * and produce not precise results. + * + * NAPI_STATE_SCHED is an internal state, napi_is_scheduled + * should not be used normally and napi_schedule should be + * used instead. + * + * Use only if the driver really needs to check if a NAPI + * is scheduled for example in the context of delayed timer + * that can be skipped if a NAPI is already scheduled. + * + * Return True if NAPI is scheduled, False otherwise. + */ +static inline bool napi_is_scheduled(struct napi_struct *n) +{ + return test_bit(NAPI_STATE_SCHED, &n->state); +} + bool napi_schedule_prep(struct napi_struct *n); /** @@ -488,11 +514,18 @@ bool napi_schedule_prep(struct napi_struct *n); * * Schedule NAPI poll routine to be called if it is not already * running. + * Return true if we schedule a NAPI or false if not. + * Refer to napi_schedule_prep() for additional reason on why + * a NAPI might not be scheduled. */ -static inline void napi_schedule(struct napi_struct *n) +static inline bool napi_schedule(struct napi_struct *n) { - if (napi_schedule_prep(n)) + if (napi_schedule_prep(n)) { __napi_schedule(n); + return true; + } + + return false; } /** @@ -507,16 +540,6 @@ static inline void napi_schedule_irqoff(struct napi_struct *n) __napi_schedule_irqoff(n); } -/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ -static inline bool napi_reschedule(struct napi_struct *napi) -{ - if (napi_schedule_prep(napi)) { - __napi_schedule(napi); - return true; - } - return false; -} - /** * napi_complete_done - NAPI processing complete * @n: NAPI context @@ -643,6 +666,10 @@ struct netdev_queue { #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif + /* NAPI instance for the queue + * Readers and writers must hold RTNL + */ + struct napi_struct *napi; /* * write-mostly part */ @@ -917,6 +944,7 @@ struct net_device_path { u8 queue; u16 wcid; u8 bss; + u8 amsdu; } mtk_wdma; }; }; @@ -1287,9 +1315,7 @@ struct netdev_net_notifier { * struct net_device *dev, * const unsigned char *addr, u16 vid) * Deletes the FDB entry from dev coresponding to addr. - * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[], - * struct net_device *dev, - * u16 vid, + * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, * struct netlink_ext_ack *extack); * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, * struct net_device *dev, struct net_device *filter_dev, @@ -1303,6 +1329,9 @@ struct netdev_net_notifier { * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], * struct netlink_ext_ack *extack); * Deletes the MDB entry from dev. + * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[], + * struct netlink_ext_ack *extack); + * Bulk deletes MDB entries from dev. * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, * struct netlink_callback *cb); * Dumps MDB entries from dev. The first argument (marker) in the netlink @@ -1564,10 +1593,8 @@ struct net_device_ops { struct net_device *dev, const unsigned char *addr, u16 vid, struct netlink_ext_ack *extack); - int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, - struct nlattr *tb[], + int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, - u16 vid, struct netlink_ext_ack *extack); int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, @@ -1587,9 +1614,16 @@ struct net_device_ops { int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack); + int (*ndo_mdb_del_bulk)(struct net_device *dev, + struct nlattr *tb[], + struct netlink_ext_ack *extack); int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, struct netlink_callback *cb); + int (*ndo_mdb_get)(struct net_device *dev, + struct nlattr *tb[], u32 portid, + u32 seq, + struct netlink_ext_ack *extack); int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, @@ -1774,6 +1808,13 @@ enum netdev_ml_priv_type { ML_PRIV_CAN, }; +enum netdev_stat_type { + NETDEV_PCPU_STAT_NONE, + NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */ + NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */ + NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */ +}; + /** * struct net_device - The DEVICE structure. * @@ -1835,6 +1876,7 @@ enum netdev_ml_priv_type { * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. + * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks. * @ethtool_ops: Management operations * @l3mdev_ops: Layer 3 master device operations * @ndisc_ops: Includes callbacks for different IPv6 neighbour @@ -1968,10 +2010,14 @@ enum netdev_ml_priv_type { * * @ml_priv: Mid-layer private * @ml_priv_type: Mid-layer private type - * @lstats: Loopback statistics - * @tstats: Tunnel statistics - * @dstats: Dummy statistics - * @vstats: Virtual ethernet statistics + * + * @pcpu_stat_type: Type of device statistics which the core should + * allocate/free: none, lstats, tstats, dstats. none + * means the driver is handling statistics allocation/ + * freeing internally. + * @lstats: Loopback statistics: packets, bytes + * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes + * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes * * @garp_port: GARP * @mrp_port: MRP @@ -2049,11 +2095,80 @@ enum netdev_ml_priv_type { * SET_NETDEV_DEVLINK_PORT macro. This pointer is static * during the time netdevice is registered. * + * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, + * where the clock is recovered. + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ struct net_device { + /* Cacheline organization can be found documented in + * Documentation/networking/net_cachelines/net_device.rst. + * Please update the document when adding new fields. + */ + + /* TX read-mostly hotpath */ + __cacheline_group_begin(net_device_read_tx); + unsigned long long priv_flags; + const struct net_device_ops *netdev_ops; + const struct header_ops *header_ops; + struct netdev_queue *_tx; + netdev_features_t gso_partial_features; + unsigned int real_num_tx_queues; + unsigned int gso_max_size; + unsigned int gso_ipv4_max_size; + u16 gso_max_segs; + s16 num_tc; + /* Note : dev->mtu is often read without holding a lock. + * Writers usually hold RTNL. + * It is recommended to use READ_ONCE() to annotate the reads, + * and to use WRITE_ONCE() to annotate the writes. + */ + unsigned int mtu; + unsigned short needed_headroom; + struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; +#ifdef CONFIG_XPS + struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; +#endif +#ifdef CONFIG_NETFILTER_EGRESS + struct nf_hook_entries __rcu *nf_hooks_egress; +#endif +#ifdef CONFIG_NET_XGRESS + struct bpf_mprog_entry __rcu *tcx_egress; +#endif + __cacheline_group_end(net_device_read_tx); + + /* TXRX read-mostly hotpath */ + __cacheline_group_begin(net_device_read_txrx); + unsigned int flags; + unsigned short hard_header_len; + netdev_features_t features; + struct inet6_dev __rcu *ip6_ptr; + __cacheline_group_end(net_device_read_txrx); + + /* RX read-mostly hotpath */ + __cacheline_group_begin(net_device_read_rx); + struct bpf_prog __rcu *xdp_prog; + struct list_head ptype_specific; + int ifindex; + unsigned int real_num_rx_queues; + struct netdev_rx_queue *_rx; + unsigned long gro_flush_timeout; + int napi_defer_hard_irqs; + unsigned int gro_max_size; + unsigned int gro_ipv4_max_size; + rx_handler_func_t __rcu *rx_handler; + void __rcu *rx_handler_data; + possible_net_t nd_net; +#ifdef CONFIG_NETPOLL + struct netpoll_info __rcu *npinfo; +#endif +#ifdef CONFIG_NET_XGRESS + struct bpf_mprog_entry __rcu *tcx_ingress; +#endif + __cacheline_group_end(net_device_read_rx); + char name[IFNAMSIZ]; struct netdev_name_node *name_node; struct dev_ifalias __rcu *ifalias; @@ -2078,7 +2193,6 @@ struct net_device { struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; - struct list_head ptype_specific; struct { struct list_head upper; @@ -2086,31 +2200,18 @@ struct net_device { } adj_list; /* Read-mostly cache-line for fast-path access */ - unsigned int flags; xdp_features_t xdp_features; - unsigned long long priv_flags; - const struct net_device_ops *netdev_ops; const struct xdp_metadata_ops *xdp_metadata_ops; - int ifindex; + const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; unsigned short gflags; - unsigned short hard_header_len; - /* Note : dev->mtu is often read without holding a lock. - * Writers usually hold RTNL. - * It is recommended to use READ_ONCE() to annotate the reads, - * and to use WRITE_ONCE() to annotate the writes. - */ - unsigned int mtu; - unsigned short needed_headroom; unsigned short needed_tailroom; - netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; - netdev_features_t gso_partial_features; unsigned int min_mtu; unsigned int max_mtu; @@ -2148,8 +2249,6 @@ struct net_device { const struct tlsdev_ops *tlsdev_ops; #endif - const struct header_ops *header_ops; - unsigned char operstate; unsigned char link_mode; @@ -2190,9 +2289,7 @@ struct net_device { /* Protocol-specific pointers */ - struct in_device __rcu *ip_ptr; - struct inet6_dev __rcu *ip6_ptr; #if IS_ENABLED(CONFIG_VLAN_8021Q) struct vlan_info __rcu *vlan_info; #endif @@ -2227,26 +2324,13 @@ struct net_device { /* Interface address info used in eth_type_trans() */ const unsigned char *dev_addr; - struct netdev_rx_queue *_rx; unsigned int num_rx_queues; - unsigned int real_num_rx_queues; - - struct bpf_prog __rcu *xdp_prog; - unsigned long gro_flush_timeout; - int napi_defer_hard_irqs; #define GRO_LEGACY_MAX_SIZE 65536u /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), * and shinfo->gso_segs is a 16bit field. */ #define GRO_MAX_SIZE (8 * 65535u) - unsigned int gro_max_size; - unsigned int gro_ipv4_max_size; unsigned int xdp_zc_max_segs; - rx_handler_func_t __rcu *rx_handler; - void __rcu *rx_handler_data; -#ifdef CONFIG_NET_XGRESS - struct bpf_mprog_entry __rcu *tcx_ingress; -#endif struct netdev_queue __rcu *ingress_queue; #ifdef CONFIG_NETFILTER_INGRESS struct nf_hook_entries __rcu *nf_hooks_ingress; @@ -2261,25 +2345,13 @@ struct net_device { /* * Cache lines mostly used on transmit path */ - struct netdev_queue *_tx ____cacheline_aligned_in_smp; unsigned int num_tx_queues; - unsigned int real_num_tx_queues; struct Qdisc __rcu *qdisc; unsigned int tx_queue_len; spinlock_t tx_global_lock; struct xdp_dev_bulk_queue __percpu *xdp_bulkq; -#ifdef CONFIG_XPS - struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; -#endif -#ifdef CONFIG_NET_XGRESS - struct bpf_mprog_entry __rcu *tcx_egress; -#endif -#ifdef CONFIG_NETFILTER_EGRESS - struct nf_hook_entries __rcu *nf_hooks_egress; -#endif - #ifdef CONFIG_NET_SCHED DECLARE_HASHTABLE (qdisc_hash, 4); #endif @@ -2318,16 +2390,11 @@ struct net_device { bool needs_free_netdev; void (*priv_destructor)(struct net_device *dev); -#ifdef CONFIG_NETPOLL - struct netpoll_info __rcu *npinfo; -#endif - - possible_net_t nd_net; - /* mid-layer private */ void *ml_priv; enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type:8; union { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; @@ -2357,20 +2424,15 @@ struct net_device { */ #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) - unsigned int gso_max_size; #define TSO_LEGACY_MAX_SIZE 65536 #define TSO_MAX_SIZE UINT_MAX unsigned int tso_max_size; - u16 gso_max_segs; #define TSO_MAX_SEGS U16_MAX u16 tso_max_segs; - unsigned int gso_ipv4_max_size; #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif - s16 num_tc; - struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; u8 prio_tc_map[TC_BITMASK + 1]; #if IS_ENABLED(CONFIG_FCOE) @@ -2405,6 +2467,14 @@ struct net_device { struct rtnl_hw_stats64 *offload_xstats_l3; struct devlink_port *devlink_port; + +#if IS_ENABLED(CONFIG_DPLL) + struct dpll_pin *dpll_pin; +#endif +#if IS_ENABLED(CONFIG_PAGE_POOL) + /** @page_pools: page pools created for this netdevice */ + struct hlist_head page_pools; +#endif }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2609,6 +2679,15 @@ static inline void *netdev_priv(const struct net_device *dev) */ #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) +void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, + enum netdev_queue_type type, + struct napi_struct *napi); + +static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) +{ + napi->irq = irq; +} + /* Default NAPI poll() weight * Device drivers are strongly advised to not use bigger value */ @@ -2725,6 +2804,16 @@ struct pcpu_sw_netstats { struct u64_stats_sync syncp; } __aligned(4 * sizeof(u64)); +struct pcpu_dstats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_drops; + u64 tx_packets; + u64 tx_bytes; + u64 tx_drops; + struct u64_stats_sync syncp; +} __aligned(8 * sizeof(u64)); + struct pcpu_lstats { u64_stats_t packets; u64_stats_t bytes; @@ -3912,6 +4001,9 @@ int generic_hwtstamp_get_lower(struct net_device *dev, int generic_hwtstamp_set_lower(struct net_device *dev, struct kernel_hwtstamp_config *kernel_cfg, struct netlink_ext_ack *extack); +int dev_set_hwtstamp_phylib(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *dev, unsigned int flags, @@ -3940,6 +4032,18 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); int dev_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid, bool recurse); bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); +void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin); +void netdev_dpll_pin_clear(struct net_device *dev); + +static inline struct dpll_pin *netdev_dpll_pin(const struct net_device *dev) +{ +#if IS_ENABLED(CONFIG_DPLL) + return dev->dpll_pin; +#else + return NULL; +#endif +} + struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3980,32 +4084,19 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev, return false; } -struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev); - -static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev) -{ - /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ - struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); - - if (likely(p)) - return p; - - return netdev_core_stats_alloc(dev); -} +void netdev_core_stats_inc(struct net_device *dev, u32 offset); #define DEV_CORE_STATS_INC(FIELD) \ static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ { \ - struct net_device_core_stats __percpu *p; \ - \ - p = dev_core_stats(dev); \ - if (p) \ - this_cpu_inc(p->FIELD); \ + netdev_core_stats_inc(dev, \ + offsetof(struct net_device_core_stats, FIELD)); \ } DEV_CORE_STATS_INC(rx_dropped) DEV_CORE_STATS_INC(tx_dropped) DEV_CORE_STATS_INC(rx_nohandler) DEV_CORE_STATS_INC(rx_otherhost_dropped) +#undef DEV_CORE_STATS_INC static __always_inline int ____dev_forward_skb(struct net_device *dev, struct sk_buff *skb, @@ -4145,6 +4236,15 @@ static inline void netdev_ref_replace(struct net_device *odev, void linkwatch_fire_event(struct net_device *dev); /** + * linkwatch_sync_dev - sync linkwatch for the given device + * @dev: network device to sync linkwatch for + * + * Sync linkwatch for the given device, removing it from the + * pending work list (if queued). + */ +void linkwatch_sync_dev(struct net_device *dev); + +/** * netif_carrier_ok - test if carrier present * @dev: network device * @@ -5214,5 +5314,6 @@ extern struct net_device *blackhole_netdev; #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) #define DEV_STATS_ADD(DEV, FIELD, VAL) \ atomic_long_add((VAL), &(DEV)->stats.__##FIELD) +#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD) #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index d68644b7c299..80900d910992 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -22,6 +22,16 @@ static inline int NF_DROP_GETERR(int verdict) return -(verdict >> NF_VERDICT_QBITS); } +static __always_inline int +NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err) +{ + BUILD_BUG_ON(err > 0xffff); + + kfree_skb_reason(skb, reason); + + return ((err << 16) | NF_STOLEN); +} + static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *a2) { diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index e8c350a3ade1..e9f4f845d760 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -186,6 +186,8 @@ struct ip_set_type_variant { /* Return true if "b" set is the same as "a" * according to the create set parameters */ bool (*same_set)(const struct ip_set *a, const struct ip_set *b); + /* Cancel ongoing garbage collectors before destroying the set*/ + void (*cancel_gc)(struct ip_set *set); /* Region-locking is used */ bool region_lock; }; @@ -242,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type); /* A generic IP set */ struct ip_set { + /* For call_cru in destroy */ + struct rcu_head rcu; /* The name of the set */ char name[IPSET_MAXNAMELEN]; /* Lock protecting the set data */ diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 625f491b95de..fb31312825ae 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -9,6 +9,7 @@ struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; + u8 init[IP_CT_DIR_MAX]; u8 last_dir; u8 flags; }; diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index f980edfdd278..743475ca7e9d 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -42,7 +42,7 @@ static inline int nf_bridge_get_physinif(const struct sk_buff *skb) if (!nf_bridge) return 0; - return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0; + return nf_bridge->physinif; } static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) @@ -56,11 +56,11 @@ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) } static inline struct net_device * -nf_bridge_get_physindev(const struct sk_buff *skb) +nf_bridge_get_physindev(const struct sk_buff *skb, struct net *net) { const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); - return nf_bridge ? nf_bridge->physindev : NULL; + return nf_bridge ? dev_get_by_index_rcu(net, nf_bridge->physinif) : NULL; } static inline struct net_device * diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 7834c0be2831..61aa48f46dd7 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -51,7 +51,7 @@ struct nf_ipv6_ops { u32 (*cookie_init_sequence)(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); int (*cookie_v6_check)(const struct ipv6hdr *iph, - const struct tcphdr *th, __u32 cookie); + const struct tcphdr *th); #endif void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, @@ -179,16 +179,16 @@ static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph, } static inline int nf_cookie_v6_check(const struct ipv6hdr *iph, - const struct tcphdr *th, __u32 cookie) + const struct tcphdr *th) { #if IS_ENABLED(CONFIG_SYN_COOKIES) #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (v6_ops) - return v6_ops->cookie_v6_check(iph, th, cookie); + return v6_ops->cookie_v6_check(iph, th); #elif IS_BUILTIN(CONFIG_IPV6) - return __cookie_v6_check(iph, th, cookie); + return __cookie_v6_check(iph, th); #endif #endif return 0; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index b11a84f6c32b..100cbb261269 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -109,11 +109,18 @@ static inline int wait_on_page_fscache_killable(struct page *page) return folio_wait_private_2_killable(page_folio(page)); } +/* Marks used on xarray-based buffers */ +#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */ +#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */ + enum netfs_io_source { NETFS_FILL_WITH_ZEROES, NETFS_DOWNLOAD_FROM_SERVER, NETFS_READ_FROM_CACHE, NETFS_INVALID_READ, + NETFS_UPLOAD_TO_SERVER, + NETFS_WRITE_TO_CACHE, + NETFS_INVALID_WRITE, } __mode(byte); typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, @@ -129,9 +136,57 @@ struct netfs_inode { struct fscache_cookie *cache; #endif loff_t remote_i_size; /* Size of the remote file */ + loff_t zero_point; /* Size after which we assume there's no data + * on the server */ + unsigned long flags; +#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */ +#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ +#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ +#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */ +}; + +/* + * A netfs group - for instance a ceph snap. This is marked on dirty pages and + * pages marked with a group must be flushed before they can be written under + * the domain of another group. + */ +struct netfs_group { + refcount_t ref; + void (*free)(struct netfs_group *netfs_group); }; /* + * Information about a dirty page (attached only if necessary). + * folio->private + */ +struct netfs_folio { + struct netfs_group *netfs_group; /* Filesystem's grouping marker (or NULL). */ + unsigned int dirty_offset; /* Write-streaming dirty data offset */ + unsigned int dirty_len; /* Write-streaming dirty data length */ +}; +#define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */ + +static inline struct netfs_folio *netfs_folio_info(struct folio *folio) +{ + void *priv = folio_get_private(folio); + + if ((unsigned long)priv & NETFS_FOLIO_INFO) + return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO); + return NULL; +} + +static inline struct netfs_group *netfs_folio_group(struct folio *folio) +{ + struct netfs_folio *finfo; + void *priv = folio_get_private(folio); + + finfo = netfs_folio_info(folio); + if (finfo) + return finfo->netfs_group; + return priv; +} + +/* * Resources required to do operations on a cache. */ struct netfs_cache_resources { @@ -143,17 +198,24 @@ struct netfs_cache_resources { }; /* - * Descriptor for a single component subrequest. + * Descriptor for a single component subrequest. Each operation represents an + * individual read/write from/to a server, a cache, a journal, etc.. + * + * The buffer iterator is persistent for the life of the subrequest struct and + * the pages it points to can be relied on to exist for the duration. */ struct netfs_io_subrequest { struct netfs_io_request *rreq; /* Supervising I/O request */ + struct work_struct work; struct list_head rreq_link; /* Link in rreq->subrequests */ + struct iov_iter io_iter; /* Iterator for this subrequest */ loff_t start; /* Where to start the I/O */ size_t len; /* Size of the I/O */ size_t transferred; /* Amount of data transferred */ refcount_t ref; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ + unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */ enum netfs_io_source source; /* Where to read from/write to */ unsigned long flags; #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ @@ -168,6 +230,13 @@ enum netfs_io_origin { NETFS_READAHEAD, /* This read was triggered by readahead */ NETFS_READPAGE, /* This read is a synchronous read */ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ + NETFS_WRITEBACK, /* This write was triggered by writepages */ + NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ + NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */ + NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */ + NETFS_DIO_READ, /* This is a direct I/O read */ + NETFS_DIO_WRITE, /* This is a direct I/O write */ + nr__netfs_io_origin } __mode(byte); /* @@ -175,19 +244,34 @@ enum netfs_io_origin { * operations to a variety of data stores and then stitch the result together. */ struct netfs_io_request { - struct work_struct work; + union { + struct work_struct work; + struct rcu_head rcu; + }; struct inode *inode; /* The file being accessed */ struct address_space *mapping; /* The mapping being accessed */ + struct kiocb *iocb; /* AIO completion vector */ struct netfs_cache_resources cache_resources; + struct list_head proc_link; /* Link in netfs_iorequests */ struct list_head subrequests; /* Contributory I/O operations */ + struct iov_iter iter; /* Unencrypted-side iterator */ + struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ void *netfs_priv; /* Private data for the netfs */ + struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ + unsigned int direct_bv_count; /* Number of elements in direct_bv[] */ unsigned int debug_id; + unsigned int rsize; /* Maximum read size (0 for none) */ + unsigned int wsize; /* Maximum write size (0 for none) */ + unsigned int subreq_counter; /* Next subreq->debug_index */ atomic_t nr_outstanding; /* Number of ops in progress */ atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ size_t submitted; /* Amount submitted for I/O so far */ size_t len; /* Length of the request */ + size_t upper_len; /* Length can be extended to here */ + size_t transferred; /* Amount to be indicated as transferred */ short error; /* 0 or error that occurred */ enum netfs_io_origin origin; /* Origin of the request */ + bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ loff_t i_size; /* Size of the file */ loff_t start; /* Start position */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ @@ -199,17 +283,25 @@ struct netfs_io_request { #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ #define NETFS_RREQ_FAILED 4 /* The request failed */ #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ +#define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */ +#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ +#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */ +#define NETFS_RREQ_BLOCKED 10 /* We blocked */ const struct netfs_request_ops *netfs_ops; + void (*cleanup)(struct netfs_io_request *req); }; /* * Operations the network filesystem can/must provide to the helpers. */ struct netfs_request_ops { + unsigned int io_request_size; /* Alloc size for netfs_io_request struct */ + unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */ int (*init_request)(struct netfs_io_request *rreq, struct file *file); void (*free_request)(struct netfs_io_request *rreq); - int (*begin_cache_operation)(struct netfs_io_request *rreq); + void (*free_subrequest)(struct netfs_io_subrequest *rreq); + /* Read request handling */ void (*expand_readahead)(struct netfs_io_request *rreq); bool (*clamp_length)(struct netfs_io_subrequest *subreq); void (*issue_read)(struct netfs_io_subrequest *subreq); @@ -217,6 +309,14 @@ struct netfs_request_ops { int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, struct folio **foliop, void **_fsdata); void (*done)(struct netfs_io_request *rreq); + + /* Modification handling */ + void (*update_i_size)(struct inode *inode, loff_t i_size); + + /* Write request handling */ + void (*create_write_requests)(struct netfs_io_request *wreq, + loff_t start, size_t len); + void (*invalidate_cache)(struct netfs_io_request *wreq); }; /* @@ -229,8 +329,7 @@ enum netfs_read_from_hole { }; /* - * Table of operations for access to a cache. This is obtained by - * rreq->ops->begin_cache_operation(). + * Table of operations for access to a cache. */ struct netfs_cache_ops { /* End an operation */ @@ -265,8 +364,8 @@ struct netfs_cache_ops { * actually do. */ int (*prepare_write)(struct netfs_cache_resources *cres, - loff_t *_start, size_t *_len, loff_t i_size, - bool no_space_allocated_yet); + loff_t *_start, size_t *_len, size_t upper_len, + loff_t i_size, bool no_space_allocated_yet); /* Prepare an on-demand read operation, shortening it to a cached/uncached * boundary as appropriate. @@ -284,22 +383,62 @@ struct netfs_cache_ops { loff_t *_data_start, size_t *_data_len); }; +/* High-level read API. */ +ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); +ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); +ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); + +/* High-level write API */ +ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, + struct netfs_group *netfs_group); +ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from, + struct netfs_group *netfs_group); +ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from); +ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from); + +/* Address operations API */ struct readahead_control; void netfs_readahead(struct readahead_control *); int netfs_read_folio(struct file *, struct folio *); int netfs_write_begin(struct netfs_inode *, struct file *, - struct address_space *, loff_t pos, unsigned int len, - struct folio **, void **fsdata); - + struct address_space *, loff_t pos, unsigned int len, + struct folio **, void **fsdata); +int netfs_writepages(struct address_space *mapping, + struct writeback_control *wbc); +bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); +int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); +void netfs_clear_inode_writeback(struct inode *inode, const void *aux); +void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length); +bool netfs_release_folio(struct folio *folio, gfp_t gfp); +int netfs_launder_folio(struct folio *folio); + +/* VMA operations API. */ +vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group); + +/* (Sub)request management API. */ void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); void netfs_get_subrequest(struct netfs_io_subrequest *subreq, enum netfs_sreq_ref_trace what); void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async, enum netfs_sreq_ref_trace what); -void netfs_stats_show(struct seq_file *); ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, struct iov_iter *new, iov_iter_extraction_t extraction_flags); +size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, + size_t max_size, size_t max_segs); +struct netfs_io_subrequest *netfs_create_write_request( + struct netfs_io_request *wreq, enum netfs_io_source dest, + loff_t start, size_t len, work_func_t worker); +void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, + bool was_async); +void netfs_queue_write_request(struct netfs_io_subrequest *subreq); + +int netfs_start_io_read(struct inode *inode); +void netfs_end_io_read(struct inode *inode); +int netfs_start_io_write(struct inode *inode); +void netfs_end_io_write(struct inode *inode); +int netfs_start_io_direct(struct inode *inode); +void netfs_end_io_direct(struct inode *inode); /** * netfs_inode - Get the netfs inode context from the inode @@ -317,30 +456,44 @@ static inline struct netfs_inode *netfs_inode(struct inode *inode) * netfs_inode_init - Initialise a netfslib inode context * @ctx: The netfs inode to initialise * @ops: The netfs's operations list + * @use_zero_point: True to use the zero_point read optimisation * * Initialise the netfs library context struct. This is expected to follow on * directly from the VFS inode struct. */ static inline void netfs_inode_init(struct netfs_inode *ctx, - const struct netfs_request_ops *ops) + const struct netfs_request_ops *ops, + bool use_zero_point) { ctx->ops = ops; ctx->remote_i_size = i_size_read(&ctx->inode); + ctx->zero_point = LLONG_MAX; + ctx->flags = 0; #if IS_ENABLED(CONFIG_FSCACHE) ctx->cache = NULL; #endif + /* ->releasepage() drives zero_point */ + if (use_zero_point) { + ctx->zero_point = ctx->remote_i_size; + mapping_set_release_always(ctx->inode.i_mapping); + } } /** * netfs_resize_file - Note that a file got resized * @ctx: The netfs inode being resized * @new_i_size: The new file size + * @changed_on_server: The change was applied to the server * * Inform the netfs lib that a file got resized so that it can adjust its state. */ -static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size) +static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size, + bool changed_on_server) { - ctx->remote_i_size = new_i_size; + if (changed_on_server) + ctx->remote_i_size = new_i_size; + if (new_i_size < ctx->zero_point) + ctx->zero_point = new_i_size; } /** diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 75d7de34c908..1a4445bf2ab9 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -228,10 +228,12 @@ bool netlink_strict_get_check(struct sk_buff *skb); int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation); + +typedef int (*netlink_filter_fn)(struct sock *dsk, struct sk_buff *skb, void *data); + int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation, - int (*filter)(struct sock *dsk, - struct sk_buff *skb, void *data), + netlink_filter_fn filter, void *filter_data); int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); int netlink_register_notifier(struct notifier_block *nb); @@ -351,5 +353,6 @@ bool netlink_ns_capable(const struct sk_buff *skb, struct user_namespace *ns, int cap); bool netlink_capable(const struct sk_buff *skb, int cap); bool netlink_net_capable(const struct sk_buff *skb, int cap); +struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast); #endif /* __LINUX_NETLINK_H */ diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 730003c4f4af..ef8d2d618d5b 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -150,7 +150,7 @@ enum nfs_opnum4 { OP_WRITE_SAME = 70, OP_CLONE = 71, - /* xattr support (RFC8726) */ + /* xattr support (RFC8276) */ OP_GETXATTR = 72, OP_SETXATTR = 73, OP_LISTXATTRS = 74, @@ -389,79 +389,203 @@ enum lock_type4 { NFS4_WRITEW_LT = 4 }; +/* + * Symbol names and values are from RFC 7531 Section 2. + * "XDR Description of NFSv4.0" + */ +enum { + FATTR4_SUPPORTED_ATTRS = 0, + FATTR4_TYPE = 1, + FATTR4_FH_EXPIRE_TYPE = 2, + FATTR4_CHANGE = 3, + FATTR4_SIZE = 4, + FATTR4_LINK_SUPPORT = 5, + FATTR4_SYMLINK_SUPPORT = 6, + FATTR4_NAMED_ATTR = 7, + FATTR4_FSID = 8, + FATTR4_UNIQUE_HANDLES = 9, + FATTR4_LEASE_TIME = 10, + FATTR4_RDATTR_ERROR = 11, + FATTR4_ACL = 12, + FATTR4_ACLSUPPORT = 13, + FATTR4_ARCHIVE = 14, + FATTR4_CANSETTIME = 15, + FATTR4_CASE_INSENSITIVE = 16, + FATTR4_CASE_PRESERVING = 17, + FATTR4_CHOWN_RESTRICTED = 18, + FATTR4_FILEHANDLE = 19, + FATTR4_FILEID = 20, + FATTR4_FILES_AVAIL = 21, + FATTR4_FILES_FREE = 22, + FATTR4_FILES_TOTAL = 23, + FATTR4_FS_LOCATIONS = 24, + FATTR4_HIDDEN = 25, + FATTR4_HOMOGENEOUS = 26, + FATTR4_MAXFILESIZE = 27, + FATTR4_MAXLINK = 28, + FATTR4_MAXNAME = 29, + FATTR4_MAXREAD = 30, + FATTR4_MAXWRITE = 31, + FATTR4_MIMETYPE = 32, + FATTR4_MODE = 33, + FATTR4_NO_TRUNC = 34, + FATTR4_NUMLINKS = 35, + FATTR4_OWNER = 36, + FATTR4_OWNER_GROUP = 37, + FATTR4_QUOTA_AVAIL_HARD = 38, + FATTR4_QUOTA_AVAIL_SOFT = 39, + FATTR4_QUOTA_USED = 40, + FATTR4_RAWDEV = 41, + FATTR4_SPACE_AVAIL = 42, + FATTR4_SPACE_FREE = 43, + FATTR4_SPACE_TOTAL = 44, + FATTR4_SPACE_USED = 45, + FATTR4_SYSTEM = 46, + FATTR4_TIME_ACCESS = 47, + FATTR4_TIME_ACCESS_SET = 48, + FATTR4_TIME_BACKUP = 49, + FATTR4_TIME_CREATE = 50, + FATTR4_TIME_DELTA = 51, + FATTR4_TIME_METADATA = 52, + FATTR4_TIME_MODIFY = 53, + FATTR4_TIME_MODIFY_SET = 54, + FATTR4_MOUNTED_ON_FILEID = 55, +}; + +/* + * Symbol names and values are from RFC 5662 Section 2. + * "XDR Description of NFSv4.1" + */ +enum { + FATTR4_DIR_NOTIF_DELAY = 56, + FATTR4_DIRENT_NOTIF_DELAY = 57, + FATTR4_DACL = 58, + FATTR4_SACL = 59, + FATTR4_CHANGE_POLICY = 60, + FATTR4_FS_STATUS = 61, + FATTR4_FS_LAYOUT_TYPES = 62, + FATTR4_LAYOUT_HINT = 63, + FATTR4_LAYOUT_TYPES = 64, + FATTR4_LAYOUT_BLKSIZE = 65, + FATTR4_LAYOUT_ALIGNMENT = 66, + FATTR4_FS_LOCATIONS_INFO = 67, + FATTR4_MDSTHRESHOLD = 68, + FATTR4_RETENTION_GET = 69, + FATTR4_RETENTION_SET = 70, + FATTR4_RETENTEVT_GET = 71, + FATTR4_RETENTEVT_SET = 72, + FATTR4_RETENTION_HOLD = 73, + FATTR4_MODE_SET_MASKED = 74, + FATTR4_SUPPATTR_EXCLCREAT = 75, + FATTR4_FS_CHARSET_CAP = 76, +}; + +/* + * Symbol names and values are from RFC 7863 Section 2. + * "XDR Description of NFSv4.2" + */ +enum { + FATTR4_CLONE_BLKSIZE = 77, + FATTR4_SPACE_FREED = 78, + FATTR4_CHANGE_ATTR_TYPE = 79, + FATTR4_SEC_LABEL = 80, +}; + +/* + * Symbol names and values are from RFC 8275 Section 5. + * "The mode_umask Attribute" + */ +enum { + FATTR4_MODE_UMASK = 81, +}; + +/* + * Symbol names and values are from RFC 8276 Section 8.6. + * "Numeric Values Assigned to Protocol Extensions" + */ +enum { + FATTR4_XATTR_SUPPORT = 82, +}; + +/* + * The following internal definitions enable processing the above + * attribute bits within 32-bit word boundaries. + */ /* Mandatory Attributes */ -#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0) -#define FATTR4_WORD0_TYPE (1UL << 1) -#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2) -#define FATTR4_WORD0_CHANGE (1UL << 3) -#define FATTR4_WORD0_SIZE (1UL << 4) -#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5) -#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6) -#define FATTR4_WORD0_NAMED_ATTR (1UL << 7) -#define FATTR4_WORD0_FSID (1UL << 8) -#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) -#define FATTR4_WORD0_LEASE_TIME (1UL << 10) -#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) +#define FATTR4_WORD0_SUPPORTED_ATTRS BIT(FATTR4_SUPPORTED_ATTRS) +#define FATTR4_WORD0_TYPE BIT(FATTR4_TYPE) +#define FATTR4_WORD0_FH_EXPIRE_TYPE BIT(FATTR4_FH_EXPIRE_TYPE) +#define FATTR4_WORD0_CHANGE BIT(FATTR4_CHANGE) +#define FATTR4_WORD0_SIZE BIT(FATTR4_SIZE) +#define FATTR4_WORD0_LINK_SUPPORT BIT(FATTR4_LINK_SUPPORT) +#define FATTR4_WORD0_SYMLINK_SUPPORT BIT(FATTR4_SYMLINK_SUPPORT) +#define FATTR4_WORD0_NAMED_ATTR BIT(FATTR4_NAMED_ATTR) +#define FATTR4_WORD0_FSID BIT(FATTR4_FSID) +#define FATTR4_WORD0_UNIQUE_HANDLES BIT(FATTR4_UNIQUE_HANDLES) +#define FATTR4_WORD0_LEASE_TIME BIT(FATTR4_LEASE_TIME) +#define FATTR4_WORD0_RDATTR_ERROR BIT(FATTR4_RDATTR_ERROR) /* Mandatory in NFSv4.1 */ -#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11) +#define FATTR4_WORD2_SUPPATTR_EXCLCREAT BIT(FATTR4_SUPPATTR_EXCLCREAT - 64) /* Recommended Attributes */ -#define FATTR4_WORD0_ACL (1UL << 12) -#define FATTR4_WORD0_ACLSUPPORT (1UL << 13) -#define FATTR4_WORD0_ARCHIVE (1UL << 14) -#define FATTR4_WORD0_CANSETTIME (1UL << 15) -#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16) -#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17) -#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18) -#define FATTR4_WORD0_FILEHANDLE (1UL << 19) -#define FATTR4_WORD0_FILEID (1UL << 20) -#define FATTR4_WORD0_FILES_AVAIL (1UL << 21) -#define FATTR4_WORD0_FILES_FREE (1UL << 22) -#define FATTR4_WORD0_FILES_TOTAL (1UL << 23) -#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24) -#define FATTR4_WORD0_HIDDEN (1UL << 25) -#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26) -#define FATTR4_WORD0_MAXFILESIZE (1UL << 27) -#define FATTR4_WORD0_MAXLINK (1UL << 28) -#define FATTR4_WORD0_MAXNAME (1UL << 29) -#define FATTR4_WORD0_MAXREAD (1UL << 30) -#define FATTR4_WORD0_MAXWRITE (1UL << 31) -#define FATTR4_WORD1_MIMETYPE (1UL << 0) -#define FATTR4_WORD1_MODE (1UL << 1) -#define FATTR4_WORD1_NO_TRUNC (1UL << 2) -#define FATTR4_WORD1_NUMLINKS (1UL << 3) -#define FATTR4_WORD1_OWNER (1UL << 4) -#define FATTR4_WORD1_OWNER_GROUP (1UL << 5) -#define FATTR4_WORD1_QUOTA_HARD (1UL << 6) -#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7) -#define FATTR4_WORD1_QUOTA_USED (1UL << 8) -#define FATTR4_WORD1_RAWDEV (1UL << 9) -#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10) -#define FATTR4_WORD1_SPACE_FREE (1UL << 11) -#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12) -#define FATTR4_WORD1_SPACE_USED (1UL << 13) -#define FATTR4_WORD1_SYSTEM (1UL << 14) -#define FATTR4_WORD1_TIME_ACCESS (1UL << 15) -#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16) -#define FATTR4_WORD1_TIME_BACKUP (1UL << 17) -#define FATTR4_WORD1_TIME_CREATE (1UL << 18) -#define FATTR4_WORD1_TIME_DELTA (1UL << 19) -#define FATTR4_WORD1_TIME_METADATA (1UL << 20) -#define FATTR4_WORD1_TIME_MODIFY (1UL << 21) -#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22) -#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23) -#define FATTR4_WORD1_DACL (1UL << 26) -#define FATTR4_WORD1_SACL (1UL << 27) -#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) -#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0) -#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) -#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) -#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) -#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) -#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) -#define FATTR4_WORD2_MODE_UMASK (1UL << 17) -#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) +#define FATTR4_WORD0_ACL BIT(FATTR4_ACL) +#define FATTR4_WORD0_ACLSUPPORT BIT(FATTR4_ACLSUPPORT) +#define FATTR4_WORD0_ARCHIVE BIT(FATTR4_ARCHIVE) +#define FATTR4_WORD0_CANSETTIME BIT(FATTR4_CANSETTIME) +#define FATTR4_WORD0_CASE_INSENSITIVE BIT(FATTR4_CASE_INSENSITIVE) +#define FATTR4_WORD0_CASE_PRESERVING BIT(FATTR4_CASE_PRESERVING) +#define FATTR4_WORD0_CHOWN_RESTRICTED BIT(FATTR4_CHOWN_RESTRICTED) +#define FATTR4_WORD0_FILEHANDLE BIT(FATTR4_FILEHANDLE) +#define FATTR4_WORD0_FILEID BIT(FATTR4_FILEID) +#define FATTR4_WORD0_FILES_AVAIL BIT(FATTR4_FILES_AVAIL) +#define FATTR4_WORD0_FILES_FREE BIT(FATTR4_FILES_FREE) +#define FATTR4_WORD0_FILES_TOTAL BIT(FATTR4_FILES_TOTAL) +#define FATTR4_WORD0_FS_LOCATIONS BIT(FATTR4_FS_LOCATIONS) +#define FATTR4_WORD0_HIDDEN BIT(FATTR4_HIDDEN) +#define FATTR4_WORD0_HOMOGENEOUS BIT(FATTR4_HOMOGENEOUS) +#define FATTR4_WORD0_MAXFILESIZE BIT(FATTR4_MAXFILESIZE) +#define FATTR4_WORD0_MAXLINK BIT(FATTR4_MAXLINK) +#define FATTR4_WORD0_MAXNAME BIT(FATTR4_MAXNAME) +#define FATTR4_WORD0_MAXREAD BIT(FATTR4_MAXREAD) +#define FATTR4_WORD0_MAXWRITE BIT(FATTR4_MAXWRITE) + +#define FATTR4_WORD1_MIMETYPE BIT(FATTR4_MIMETYPE - 32) +#define FATTR4_WORD1_MODE BIT(FATTR4_MODE - 32) +#define FATTR4_WORD1_NO_TRUNC BIT(FATTR4_NO_TRUNC - 32) +#define FATTR4_WORD1_NUMLINKS BIT(FATTR4_NUMLINKS - 32) +#define FATTR4_WORD1_OWNER BIT(FATTR4_OWNER - 32) +#define FATTR4_WORD1_OWNER_GROUP BIT(FATTR4_OWNER_GROUP - 32) +#define FATTR4_WORD1_QUOTA_HARD BIT(FATTR4_QUOTA_AVAIL_HARD - 32) +#define FATTR4_WORD1_QUOTA_SOFT BIT(FATTR4_QUOTA_AVAIL_SOFT - 32) +#define FATTR4_WORD1_QUOTA_USED BIT(FATTR4_QUOTA_USED - 32) +#define FATTR4_WORD1_RAWDEV BIT(FATTR4_RAWDEV - 32) +#define FATTR4_WORD1_SPACE_AVAIL BIT(FATTR4_SPACE_AVAIL - 32) +#define FATTR4_WORD1_SPACE_FREE BIT(FATTR4_SPACE_FREE - 32) +#define FATTR4_WORD1_SPACE_TOTAL BIT(FATTR4_SPACE_TOTAL - 32) +#define FATTR4_WORD1_SPACE_USED BIT(FATTR4_SPACE_USED - 32) +#define FATTR4_WORD1_SYSTEM BIT(FATTR4_SYSTEM - 32) +#define FATTR4_WORD1_TIME_ACCESS BIT(FATTR4_TIME_ACCESS - 32) +#define FATTR4_WORD1_TIME_ACCESS_SET BIT(FATTR4_TIME_ACCESS_SET - 32) +#define FATTR4_WORD1_TIME_BACKUP BIT(FATTR4_TIME_BACKUP - 32) +#define FATTR4_WORD1_TIME_CREATE BIT(FATTR4_TIME_CREATE - 32) +#define FATTR4_WORD1_TIME_DELTA BIT(FATTR4_TIME_DELTA - 32) +#define FATTR4_WORD1_TIME_METADATA BIT(FATTR4_TIME_METADATA - 32) +#define FATTR4_WORD1_TIME_MODIFY BIT(FATTR4_TIME_MODIFY - 32) +#define FATTR4_WORD1_TIME_MODIFY_SET BIT(FATTR4_TIME_MODIFY_SET - 32) +#define FATTR4_WORD1_MOUNTED_ON_FILEID BIT(FATTR4_MOUNTED_ON_FILEID - 32) +#define FATTR4_WORD1_DACL BIT(FATTR4_DACL - 32) +#define FATTR4_WORD1_SACL BIT(FATTR4_SACL - 32) +#define FATTR4_WORD1_FS_LAYOUT_TYPES BIT(FATTR4_FS_LAYOUT_TYPES - 32) + +#define FATTR4_WORD2_LAYOUT_TYPES BIT(FATTR4_LAYOUT_TYPES - 64) +#define FATTR4_WORD2_LAYOUT_BLKSIZE BIT(FATTR4_LAYOUT_BLKSIZE - 64) +#define FATTR4_WORD2_MDSTHRESHOLD BIT(FATTR4_MDSTHRESHOLD - 64) +#define FATTR4_WORD2_CLONE_BLKSIZE BIT(FATTR4_CLONE_BLKSIZE - 64) +#define FATTR4_WORD2_CHANGE_ATTR_TYPE BIT(FATTR4_CHANGE_ATTR_TYPE - 64) +#define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64) +#define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64) +#define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) @@ -745,4 +869,26 @@ enum { RCA4_TYPE_MASK_OTHER_LAYOUT_MAX = 15, }; +enum nfs_cb_opnum4 { + OP_CB_GETATTR = 3, + OP_CB_RECALL = 4, + + /* Callback operations new to NFSv4.1 */ + OP_CB_LAYOUTRECALL = 5, + OP_CB_NOTIFY = 6, + OP_CB_PUSH_DELEG = 7, + OP_CB_RECALL_ANY = 8, + OP_CB_RECALLABLE_OBJ_AVAIL = 9, + OP_CB_RECALL_SLOT = 10, + OP_CB_SEQUENCE = 11, + OP_CB_WANTS_CANCELLED = 12, + OP_CB_NOTIFY_LOCK = 13, + OP_CB_NOTIFY_DEVICEID = 14, + + /* Callback operations new to NFSv4.2 */ + OP_CB_OFFLOAD = 15, + + OP_CB_ILLEGAL = 10044, +}; + #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 279262057a92..f5ce7b101146 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -595,7 +595,6 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); * linux/fs/nfs/write.c */ extern int nfs_congestion_kb; -extern int nfs_writepage(struct page *page, struct writeback_control *wbc); extern int nfs_writepages(struct address_space *, struct writeback_control *); extern int nfs_flush_incompatible(struct file *file, struct folio *folio); extern int nfs_update_folio(struct file *file, struct folio *folio, diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 20eeba8b009d..cd797e00fe35 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -48,6 +48,7 @@ struct nfs_client { #define NFS_CS_NOPING 6 /* - don't ping on connect */ #define NFS_CS_DS 7 /* - Server is a DS */ #define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */ +#define NFS_CS_PNFS 9 /* - Server used for pnfs */ struct sockaddr_storage cl_addr; /* server identifier */ size_t cl_addrlen; char * cl_hostname; /* hostname of server */ @@ -238,6 +239,7 @@ struct nfs_server { struct list_head delegations; struct list_head ss_copies; + unsigned long delegation_gen; unsigned long mig_gen; unsigned long mig_status; #define NFS_MIG_IN_TRANSITION (1) diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index aa9f4c6ebe26..1c315f854ea8 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -157,7 +157,9 @@ extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); extern int nfs_page_group_lock_subrequests(struct nfs_page *head); -extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode); +extern void nfs_join_page_group(struct nfs_page *head, + struct nfs_commit_info *cinfo, + struct inode *inode); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 12bbb5c63664..539b57fbf3ce 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1772,7 +1772,7 @@ struct nfs_rpc_ops { void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); int (*link) (struct inode *, struct inode *, const struct qstr *); - int (*symlink) (struct inode *, struct dentry *, struct page *, + int (*symlink) (struct inode *, struct dentry *, struct folio *, unsigned int, struct iattr *); int (*mkdir) (struct inode *, struct dentry *, struct iattr *); int (*rmdir) (struct inode *, const struct qstr *); diff --git a/include/linux/node.h b/include/linux/node.h index 427a5975cf40..25b66d705ee2 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -20,14 +20,14 @@ #include <linux/list.h> /** - * struct node_hmem_attrs - heterogeneous memory performance attributes + * struct access_coordinate - generic performance coordinates container * * @read_bandwidth: Read bandwidth in MB/s * @write_bandwidth: Write bandwidth in MB/s * @read_latency: Read latency in nanoseconds * @write_latency: Write latency in nanoseconds */ -struct node_hmem_attrs { +struct access_coordinate { unsigned int read_bandwidth; unsigned int write_bandwidth; unsigned int read_latency; @@ -65,7 +65,7 @@ struct node_cache_attrs { #ifdef CONFIG_HMEM_REPORTING void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs); -void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, +void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, unsigned access); #else static inline void node_add_cache(unsigned int nid, @@ -74,7 +74,7 @@ static inline void node_add_cache(unsigned int nid, } static inline void node_set_perf_attrs(unsigned int nid, - struct node_hmem_attrs *hmem_attrs, + struct access_coordinate *coord, unsigned access) { } diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 8d07116caaf1..b61438313a73 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -93,10 +93,10 @@ #include <linux/threads.h> #include <linux/bitmap.h> #include <linux/minmax.h> +#include <linux/nodemask_types.h> #include <linux/numa.h> #include <linux/random.h> -typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; extern nodemask_t _unused_nodemask_arg_; /** diff --git a/include/linux/nodemask_types.h b/include/linux/nodemask_types.h new file mode 100644 index 000000000000..6b28d97ea6ed --- /dev/null +++ b/include/linux/nodemask_types.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_NODEMASK_TYPES_H +#define __LINUX_NODEMASK_TYPES_H + +#include <linux/bitops.h> +#include <linux/numa.h> + +typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; + +#endif /* __LINUX_NODEMASK_TYPES_H */ diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 771cb0285872..5601d14e2886 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -2,6 +2,7 @@ #ifndef _LINUX_NSPROXY_H #define _LINUX_NSPROXY_H +#include <linux/refcount.h> #include <linux/spinlock.h> #include <linux/sched.h> diff --git a/include/linux/nubus.h b/include/linux/nubus.h index bdcd85e622d8..4d103ac8f5c7 100644 --- a/include/linux/nubus.h +++ b/include/linux/nubus.h @@ -89,8 +89,6 @@ struct nubus_driver { void (*remove)(struct nubus_board *board); }; -extern struct bus_type nubus_bus_type; - /* Generic NuBus interface functions, modelled after the PCI interface */ #ifdef CONFIG_PROC_FS extern bool nubus_populate_procfs; diff --git a/include/linux/numa.h b/include/linux/numa.h index 59df211d051f..915033a75731 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NUMA_H #define _LINUX_NUMA_H +#include <linux/init.h> #include <linux/types.h> #ifdef CONFIG_NODES_SHIFT @@ -12,6 +13,7 @@ #define MAX_NUMNODES (1 << NODES_SHIFT) #define NUMA_NO_NODE (-1) +#define NUMA_NO_MEMBLK (-1) /* optionally keep NUMA memory info available post init */ #ifdef CONFIG_NUMA_KEEP_MEMINFO @@ -21,33 +23,32 @@ #endif #ifdef CONFIG_NUMA -#include <linux/printk.h> #include <asm/sparsemem.h> /* Generic implementation available */ -int numa_map_to_online_node(int node); +int numa_nearest_node(int node, unsigned int state); #ifndef memory_add_physaddr_to_nid -static inline int memory_add_physaddr_to_nid(u64 start) -{ - pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n", - start); - return 0; -} +int memory_add_physaddr_to_nid(u64 start); #endif + #ifndef phys_to_target_node -static inline int phys_to_target_node(u64 start) +int phys_to_target_node(u64 start); +#endif + +#ifndef numa_fill_memblks +static inline int __init numa_fill_memblks(u64 start, u64 end) { - pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n", - start); - return 0; + return NUMA_NO_MEMBLK; } #endif + #else /* !CONFIG_NUMA */ -static inline int numa_map_to_online_node(int node) +static inline int numa_nearest_node(int node, unsigned int state) { return NUMA_NO_NODE; } + static inline int memory_add_physaddr_to_nid(u64 start) { return 0; @@ -58,6 +59,8 @@ static inline int phys_to_target_node(u64 start) } #endif +#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE) + #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP extern const struct attribute_group arch_node_dev_group; #endif diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h index dcb8030062dd..c1d0bc5d9624 100644 --- a/include/linux/nvme-auth.h +++ b/include/linux/nvme-auth.h @@ -9,9 +9,9 @@ #include <crypto/kpp.h> struct nvme_dhchap_key { - u8 *key; size_t len; u8 hash; + u8 key[]; }; u32 nvme_auth_get_seqnum(void); @@ -24,10 +24,13 @@ const char *nvme_auth_digest_name(u8 hmac_id); size_t nvme_auth_hmac_hash_len(u8 hmac_id); u8 nvme_auth_hmac_id(const char *hmac_name); +u32 nvme_auth_key_struct_size(u32 key_len); struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, u8 key_hash); void nvme_auth_free_key(struct nvme_dhchap_key *key); -u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn); +struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash); +struct nvme_dhchap_key *nvme_auth_transform_key( + struct nvme_dhchap_key *key, char *nqn); int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key); int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, u8 *challenge, u8 *aug, size_t hlen); diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h new file mode 100644 index 000000000000..e10333d78dbb --- /dev/null +++ b/include/linux/nvme-keyring.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Hannes Reinecke, SUSE Labs + */ + +#ifndef _NVME_KEYRING_H +#define _NVME_KEYRING_H + +#if IS_ENABLED(CONFIG_NVME_KEYRING) + +key_serial_t nvme_tls_psk_default(struct key *keyring, + const char *hostnqn, const char *subnqn); + +key_serial_t nvme_keyring_id(void); + +#else + +static inline key_serial_t nvme_tls_psk_default(struct key *keyring, + const char *hostnqn, const char *subnqn) +{ + return 0; +} +static inline key_serial_t nvme_keyring_id(void) +{ + return 0; +} +#endif /* !CONFIG_NVME_KEYRING */ +#endif /* _NVME_KEYRING_H */ diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h index 57ebe1267f7f..e07e8978d691 100644 --- a/include/linux/nvme-tcp.h +++ b/include/linux/nvme-tcp.h @@ -18,6 +18,12 @@ enum nvme_tcp_pfv { NVME_TCP_PFV_1_0 = 0x0, }; +enum nvme_tcp_tls_cipher { + NVME_TCP_TLS_CIPHER_INVALID = 0, + NVME_TCP_TLS_CIPHER_SHA256 = 1, + NVME_TCP_TLS_CIPHER_SHA384 = 2, +}; + enum nvme_tcp_fatal_error_status { NVME_TCP_FES_INVALID_PDU_HDR = 0x01, NVME_TCP_FES_PDU_SEQ_ERR = 0x02, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 26dd3f859d9d..bc605ec4a3fd 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -20,7 +20,6 @@ #define NVMF_TRSVCID_SIZE 32 #define NVMF_TRADDR_SIZE 256 #define NVMF_TSAS_SIZE 256 -#define NVMF_AUTH_HASH_LEN 64 #define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" @@ -108,6 +107,13 @@ enum { NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ }; +/* TSAS SECTYPE for TCP transport */ +enum { + NVMF_TCP_SECTYPE_NONE = 0, /* No Security */ + NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */ + NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */ +}; + #define NVME_AQ_DEPTH 32 #define NVME_NR_AEN_COMMANDS 1 #define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS) @@ -810,12 +816,6 @@ struct nvme_reservation_status_ext { struct nvme_registered_ctrl_ext regctl_eds[]; }; -enum nvme_async_event_type { - NVME_AER_TYPE_ERROR = 0, - NVME_AER_TYPE_SMART = 1, - NVME_AER_TYPE_NOTICE = 2, -}; - /* I/O commands */ enum nvme_opcode { @@ -1493,6 +1493,9 @@ struct nvmf_disc_rsp_page_entry { __u16 pkey; __u8 resv10[246]; } rdma; + struct tcp { + __u8 sectype; + } tcp; } tsas; }; @@ -1722,7 +1725,7 @@ struct nvmf_auth_dhchap_success1_data { __u8 rsvd2; __u8 rvalid; __u8 rsvd3[7]; - /* 'hl' bytes of response value if 'rvalid' is set */ + /* 'hl' bytes of response value */ __u8 rval[]; }; @@ -1809,7 +1812,7 @@ struct nvme_command { }; }; -static inline bool nvme_is_fabrics(struct nvme_command *cmd) +static inline bool nvme_is_fabrics(const struct nvme_command *cmd) { return cmd->common.opcode == nvme_fabrics_command; } @@ -1828,7 +1831,7 @@ struct nvme_error_slot { __u8 resv2[24]; }; -static inline bool nvme_is_write(struct nvme_command *cmd) +static inline bool nvme_is_write(const struct nvme_command *cmd) { /* * What a mess... diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 4523e4e83319..34c0e58dfa26 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -81,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf); const char *nvmem_dev_name(struct nvmem_device *nvmem); +size_t nvmem_dev_size(struct nvmem_device *nvmem); void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries); @@ -127,6 +128,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell, return -EOPNOTSUPP; } +static inline int nvmem_cell_read_u8(struct device *dev, + const char *cell_id, u8 *val) +{ + return -EOPNOTSUPP; +} + static inline int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) { @@ -241,7 +248,6 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id); struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *name); -struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem); #else static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) @@ -254,12 +260,6 @@ static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np, { return ERR_PTR(-EOPNOTSUPP); } - -static inline struct device_node * -of_nvmem_layout_get_container(struct nvmem_device *nvmem) -{ - return NULL; -} #endif /* CONFIG_NVMEM && CONFIG_OF */ #endif /* ifndef _LINUX_NVMEM_CONSUMER_H */ diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index dae26295e6be..f0ba0e03218f 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -9,6 +9,7 @@ #ifndef _LINUX_NVMEM_PROVIDER_H #define _LINUX_NVMEM_PROVIDER_H +#include <linux/device.h> #include <linux/device/driver.h> #include <linux/err.h> #include <linux/errno.h> @@ -82,13 +83,15 @@ struct nvmem_cell_info { * @owner: Pointer to exporter module. Used for refcounting. * @cells: Optional array of pre-defined NVMEM cells. * @ncells: Number of elements in cells. + * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax. + * @fixup_dt_cell_info: Will be called before a cell is added. Can be + * used to modify the nvmem_cell_info. * @keepout: Optional array of keepout ranges (sorted ascending by start). * @nkeepout: Number of elements in the keepout array. * @type: Type of the nvmem storage * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. * @of_node: If given, this will be used instead of the parent's of_node. - * @no_of_node: Device should not use the parent's of_node even if it's !NULL. * @reg_read: Callback to read data. * @reg_write: Callback to write data. * @size: Device size. @@ -112,6 +115,9 @@ struct nvmem_config { struct module *owner; const struct nvmem_cell_info *cells; int ncells; + bool add_legacy_fixed_of_cells; + void (*fixup_dt_cell_info)(struct nvmem_device *nvmem, + struct nvmem_cell_info *cell); const struct nvmem_keepout *keepout; unsigned int nkeepout; enum nvmem_type type; @@ -120,7 +126,6 @@ struct nvmem_config { bool ignore_wp; struct nvmem_layout *layout; struct device_node *of_node; - bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; int size; @@ -154,15 +159,11 @@ struct nvmem_cell_table { /** * struct nvmem_layout - NVMEM layout definitions * - * @name: Layout name. - * @of_match_table: Open firmware match table. + * @dev: Device-model layout device. + * @nvmem: The underlying NVMEM device * @add_cells: Will be called if a nvmem device is found which * has this layout. The function will add layout * specific cells with nvmem_add_one_cell(). - * @fixup_cell_info: Will be called before a cell is added. Can be - * used to modify the nvmem_cell_info. - * @owner: Pointer to struct module. - * @node: List node. * * A nvmem device can hold a well defined structure which can just be * evaluated during runtime. For example a TLV list, or a list of "name=val" @@ -170,17 +171,15 @@ struct nvmem_cell_table { * cells. */ struct nvmem_layout { - const char *name; - const struct of_device_id *of_match_table; - int (*add_cells)(struct device *dev, struct nvmem_device *nvmem, - struct nvmem_layout *layout); - void (*fixup_cell_info)(struct nvmem_device *nvmem, - struct nvmem_layout *layout, - struct nvmem_cell_info *cell); - - /* private */ - struct module *owner; - struct list_head node; + struct device dev; + struct nvmem_device *nvmem; + int (*add_cells)(struct nvmem_layout *layout); +}; + +struct nvmem_layout_driver { + struct device_driver driver; + int (*probe)(struct nvmem_layout *layout); + void (*remove)(struct nvmem_layout *layout); }; #if IS_ENABLED(CONFIG_NVMEM) @@ -197,13 +196,14 @@ void nvmem_del_cell_table(struct nvmem_cell_table *table); int nvmem_add_one_cell(struct nvmem_device *nvmem, const struct nvmem_cell_info *info); -int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner); -#define nvmem_layout_register(layout) \ - __nvmem_layout_register(layout, THIS_MODULE) +int nvmem_layout_register(struct nvmem_layout *layout); void nvmem_layout_unregister(struct nvmem_layout *layout); -const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem, - struct nvmem_layout *layout); +int nvmem_layout_driver_register(struct nvmem_layout_driver *drv); +void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv); +#define module_nvmem_layout_driver(__nvmem_layout_driver) \ + module_driver(__nvmem_layout_driver, nvmem_layout_driver_register, \ + nvmem_layout_driver_unregister) #else @@ -235,17 +235,27 @@ static inline int nvmem_layout_register(struct nvmem_layout *layout) static inline void nvmem_layout_unregister(struct nvmem_layout *layout) {} -static inline const void * -nvmem_layout_get_match_data(struct nvmem_device *nvmem, - struct nvmem_layout *layout) +#endif /* CONFIG_NVMEM */ + +#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) + +/** + * of_nvmem_layout_get_container() - Get OF node of layout container + * + * @nvmem: nvmem device + * + * Return: a node pointer with refcount incremented or NULL if no + * container exists. Use of_node_put() on it when done. + */ +struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem); + +#else /* CONFIG_NVMEM && CONFIG_OF */ + +static inline struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem) { return NULL; } -#endif /* CONFIG_NVMEM */ - -#define module_nvmem_layout_driver(__layout_driver) \ - module_driver(__layout_driver, nvmem_layout_register, \ - nvmem_layout_unregister) +#endif /* CONFIG_NVMEM && CONFIG_OF */ #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ diff --git a/include/linux/objpool.h b/include/linux/objpool.h new file mode 100644 index 000000000000..15aff4a17f0c --- /dev/null +++ b/include/linux/objpool.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_OBJPOOL_H +#define _LINUX_OBJPOOL_H + +#include <linux/types.h> +#include <linux/refcount.h> + +/* + * objpool: ring-array based lockless MPMC queue + * + * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org + * + * objpool is a scalable implementation of high performance queue for + * object allocation and reclamation, such as kretprobe instances. + * + * With leveraging percpu ring-array to mitigate hot spots of memory + * contention, it delivers near-linear scalability for high parallel + * scenarios. The objpool is best suited for the following cases: + * 1) Memory allocation or reclamation are prohibited or too expensive + * 2) Consumers are of different priorities, such as irqs and threads + * + * Limitations: + * 1) Maximum objects (capacity) is fixed after objpool creation + * 2) All pre-allocated objects are managed in percpu ring array, + * which consumes more memory than linked lists + */ + +/** + * struct objpool_slot - percpu ring array of objpool + * @head: head sequence of the local ring array (to retrieve at) + * @tail: tail sequence of the local ring array (to append at) + * @last: the last sequence number marked as ready for retrieve + * @mask: bits mask for modulo capacity to compute array indexes + * @entries: object entries on this slot + * + * Represents a cpu-local array-based ring buffer, its size is specialized + * during initialization of object pool. The percpu objpool node is to be + * allocated from local memory for NUMA system, and to be kept compact in + * continuous memory: CPU assigned number of objects are stored just after + * the body of objpool_node. + * + * Real size of the ring array is far too smaller than the value range of + * head and tail, typed as uint32_t: [0, 2^32), so only lower bits (mask) + * of head and tail are used as the actual position in the ring array. In + * general the ring array is acting like a small sliding window, which is + * always moving forward in the loop of [0, 2^32). + */ +struct objpool_slot { + uint32_t head; + uint32_t tail; + uint32_t last; + uint32_t mask; + void *entries[]; +} __packed; + +struct objpool_head; + +/* + * caller-specified callback for object initial setup, it's only called + * once for each object (just after the memory allocation of the object) + */ +typedef int (*objpool_init_obj_cb)(void *obj, void *context); + +/* caller-specified cleanup callback for objpool destruction */ +typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context); + +/** + * struct objpool_head - object pooling metadata + * @obj_size: object size, aligned to sizeof(void *) + * @nr_objs: total objs (to be pre-allocated with objpool) + * @nr_cpus: local copy of nr_cpu_ids + * @capacity: max objs can be managed by one objpool_slot + * @gfp: gfp flags for kmalloc & vmalloc + * @ref: refcount of objpool + * @flags: flags for objpool management + * @cpu_slots: pointer to the array of objpool_slot + * @release: resource cleanup callback + * @context: caller-provided context + */ +struct objpool_head { + int obj_size; + int nr_objs; + int nr_cpus; + int capacity; + gfp_t gfp; + refcount_t ref; + unsigned long flags; + struct objpool_slot **cpu_slots; + objpool_fini_cb release; + void *context; +}; + +#define OBJPOOL_NR_OBJECT_MAX (1UL << 24) /* maximum numbers of total objects */ +#define OBJPOOL_OBJECT_SIZE_MAX (1UL << 16) /* maximum size of an object */ + +/** + * objpool_init() - initialize objpool and pre-allocated objects + * @pool: the object pool to be initialized, declared by caller + * @nr_objs: total objects to be pre-allocated by this object pool + * @object_size: size of an object (should be > 0) + * @gfp: flags for memory allocation (via kmalloc or vmalloc) + * @context: user context for object initialization callback + * @objinit: object initialization callback for extra setup + * @release: cleanup callback for extra cleanup task + * + * return value: 0 for success, otherwise error code + * + * All pre-allocated objects are to be zeroed after memory allocation. + * Caller could do extra initialization in objinit callback. objinit() + * will be called just after slot allocation and called only once for + * each object. After that the objpool won't touch any content of the + * objects. It's caller's duty to perform reinitialization after each + * pop (object allocation) or do clearance before each push (object + * reclamation). + */ +int objpool_init(struct objpool_head *pool, int nr_objs, int object_size, + gfp_t gfp, void *context, objpool_init_obj_cb objinit, + objpool_fini_cb release); + +/** + * objpool_pop() - allocate an object from objpool + * @pool: object pool + * + * return value: object ptr or NULL if failed + */ +void *objpool_pop(struct objpool_head *pool); + +/** + * objpool_push() - reclaim the object and return back to objpool + * @obj: object ptr to be pushed to objpool + * @pool: object pool + * + * return: 0 or error code (it fails only when user tries to push + * the same object multiple times or wrong "objects" into objpool) + */ +int objpool_push(void *obj, struct objpool_head *pool); + +/** + * objpool_drop() - discard the object and deref objpool + * @obj: object ptr to be discarded + * @pool: object pool + * + * return: 0 if objpool was released; -EAGAIN if there are still + * outstanding objects + * + * objpool_drop is normally for the release of outstanding objects + * after objpool cleanup (objpool_fini). Thinking of this example: + * kretprobe is unregistered and objpool_fini() is called to release + * all remained objects, but there are still objects being used by + * unfinished kretprobes (like blockable function: sys_accept). So + * only when the last outstanding object is dropped could the whole + * objpool be released along with the call of objpool_drop() + */ +int objpool_drop(void *obj, struct objpool_head *pool); + +/** + * objpool_free() - release objpool forcely (all objects to be freed) + * @pool: object pool to be released + */ +void objpool_free(struct objpool_head *pool); + +/** + * objpool_fini() - deref object pool (also releasing unused objects) + * @pool: object pool to be dereferenced + * + * objpool_fini() will try to release all remained free objects and + * then drop an extra reference of the objpool. If all objects are + * already returned to objpool (so called synchronous use cases), + * the objpool itself will be freed together. But if there are still + * outstanding objects (so called asynchronous use cases, such like + * blockable kretprobe), the objpool won't be released until all + * the outstanding objects are dropped, but the caller must assure + * there are no concurrent objpool_push() on the fly. Normally RCU + * is being required to make sure all ongoing objpool_push() must + * be finished before calling objpool_fini(), so does test_objpool, + * kretprobe or rethook + */ +void objpool_fini(struct objpool_head *pool); + +#endif /* _LINUX_OBJPOOL_H */ diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 03f82c2c2ebf..33212e93f4a6 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -48,13 +48,13 @@ #define ANNOTATE_NOENDBR \ "986: \n\t" \ ".pushsection .discard.noendbr\n\t" \ - ".long 986b - .\n\t" \ + ".long 986b\n\t" \ ".popsection\n\t" #define ASM_REACHABLE \ "998:\n\t" \ ".pushsection .discard.reachable\n\t" \ - ".long 998b - .\n\t" \ + ".long 998b\n\t" \ ".popsection\n\t" #else /* __ASSEMBLY__ */ @@ -66,7 +66,7 @@ #define ANNOTATE_INTRA_FUNCTION_CALL \ 999: \ .pushsection .discard.intra_function_calls; \ - .long 999b - .; \ + .long 999b; \ .popsection; /* @@ -118,7 +118,7 @@ .macro ANNOTATE_NOENDBR .Lhere_\@: .pushsection .discard.noendbr - .long .Lhere_\@ - . + .long .Lhere_\@ .popsection .endm @@ -130,7 +130,8 @@ * it will be ignored. */ .macro VALIDATE_UNRET_BEGIN -#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY) +#if defined(CONFIG_NOINSTR_VALIDATION) && \ + (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) .Lhere_\@: .pushsection .discard.validate_unret .long .Lhere_\@ - . @@ -141,7 +142,7 @@ .macro REACHABLE .Lhere_\@: .pushsection .discard.reachable - .long .Lhere_\@ - . + .long .Lhere_\@ .popsection .endm diff --git a/include/linux/of.h b/include/linux/of.h index ed679819c279..6a9ddf20e79a 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1676,8 +1676,8 @@ int of_overlay_notifier_unregister(struct notifier_block *nb); #else -static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size, - int *ovcs_id) +static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, + int *ovcs_id, struct device_node *target_base) { return -ENOTSUPP; } diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 2c7a3d4bc775..9042bca5bb84 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -2,10 +2,7 @@ #ifndef _LINUX_OF_DEVICE_H #define _LINUX_OF_DEVICE_H -#include <linux/platform_device.h> -#include <linux/of_platform.h> /* temporary until merge */ - -#include <linux/of.h> +#include <linux/device/driver.h> struct device; struct of_device_id; @@ -40,6 +37,9 @@ static inline int of_dma_configure(struct device *dev, { return of_dma_configure_id(dev, np, force_dma, NULL); } + +void of_device_make_bus_id(struct device *dev); + #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -82,6 +82,9 @@ static inline int of_dma_configure(struct device *dev, { return 0; } + +static inline void of_device_make_bus_id(struct device *dev) {} + #endif /* CONFIG_OF */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index 9a5e6b410dd2..e61cbbe12dac 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -8,20 +8,19 @@ struct iommu_ops; #ifdef CONFIG_OF_IOMMU -extern const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np, - const u32 *id); +extern int of_iommu_configure(struct device *dev, struct device_node *master_np, + const u32 *id); extern void of_iommu_get_resv_regions(struct device *dev, struct list_head *list); #else -static inline const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np, - const u32 *id) +static inline int of_iommu_configure(struct device *dev, + struct device_node *master_np, + const u32 *id) { - return NULL; + return -ENODEV; } static inline void of_iommu_get_resv_regions(struct device *dev, diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index fadfea575485..a2ff1ad48f7f 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -7,11 +7,11 @@ */ #include <linux/mod_devicetable.h> -#include <linux/of_device.h> -#include <linux/platform_device.h> struct device; +struct device_node; struct of_device_id; +struct platform_device; /** * struct of_dev_auxdata - lookup table entry for device names & platform_data diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 0f4a8903922a..3921fbed0b28 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -17,12 +17,10 @@ * build_OID_registry.pl to generate the data for look_up_OID(). */ enum OID { - OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ OID_id_dsa, /* 1.2.840.10040.4.1 */ OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */ OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */ - OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */ OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */ OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */ @@ -30,10 +28,6 @@ enum OID { /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ - OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */ - OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */ - OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */ - OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */ OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */ OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */ OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */ @@ -49,11 +43,6 @@ enum OID { OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */ - /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */ - OID_md2, /* 1.2.840.113549.2.2 */ - OID_md4, /* 1.2.840.113549.2.4 */ - OID_md5, /* 1.2.840.113549.2.5 */ - OID_mskrb5, /* 1.2.840.48018.1.2.2 */ OID_krb5, /* 1.2.840.113554.1.2.2 */ OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */ @@ -67,6 +56,7 @@ enum OID { OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */ + OID_negoex, /* 1.3.6.1.4.1.311.2.2.30 */ OID_spnego, /* 1.3.6.1.5.5.2 */ @@ -74,7 +64,6 @@ enum OID { OID_PKU2U, /* 1.3.5.1.5.2.7 */ OID_Scram, /* 1.3.6.1.5.5.14 */ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ - OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ @@ -140,6 +129,17 @@ enum OID { OID_TPMImportableKey, /* 2.23.133.10.1.4 */ OID_TPMSealedData, /* 2.23.133.10.1.5 */ + /* CSOR FIPS-202 SHA-3 */ + OID_sha3_256, /* 2.16.840.1.101.3.4.2.8 */ + OID_sha3_384, /* 2.16.840.1.101.3.4.2.9 */ + OID_sha3_512, /* 2.16.840.1.101.3.4.2.10 */ + OID_id_ecdsa_with_sha3_256, /* 2.16.840.1.101.3.4.3.10 */ + OID_id_ecdsa_with_sha3_384, /* 2.16.840.1.101.3.4.3.11 */ + OID_id_ecdsa_with_sha3_512, /* 2.16.840.1.101.3.4.3.12 */ + OID_id_rsassa_pkcs1_v1_5_with_sha3_256, /* 2.16.840.1.101.3.4.3.14 */ + OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */ + OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */ + OID__NR }; diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h index 5581dbd3bd34..ea8fb31379e3 100644 --- a/include/linux/osq_lock.h +++ b/include/linux/osq_lock.h @@ -6,11 +6,6 @@ * An MCS like lock especially tailored for optimistic spinning for sleeping * lock implementations (mutex, rwsem, etc). */ -struct optimistic_spin_node { - struct optimistic_spin_node *next, *prev; - int locked; /* 1 if lock acquired */ - int cpu; /* encoded CPU # + 1 value */ -}; struct optimistic_spin_queue { /* diff --git a/include/linux/overflow.h b/include/linux/overflow.h index f9b60313eaea..7b5cf4a5cd19 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -309,4 +309,39 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) #define struct_size_t(type, member, count) \ struct_size((type *)NULL, member, count) +/** + * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family. + * Enables caller macro to pass (different) initializer. + * + * @type: structure type name, including "struct" keyword. + * @name: Name for a variable to define. + * @member: Name of the array member. + * @count: Number of elements in the array; must be compile-time const. + * @initializer: initializer expression (could be empty for no init). + */ +#define _DEFINE_FLEX(type, name, member, count, initializer) \ + _Static_assert(__builtin_constant_p(count), \ + "onstack flex array members require compile-time const count"); \ + union { \ + u8 bytes[struct_size_t(type, member, count)]; \ + type obj; \ + } name##_u initializer; \ + type *name = (type *)&name##_u + +/** + * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing + * flexible array member. + * + * @type: structure type name, including "struct" keyword. + * @name: Name for a variable to define. + * @member: Name of the array member. + * @count: Number of elements in the array; must be compile-time const. + * + * Define a zeroed, on-stack, instance of @type structure with a trailing + * flexible array member. + * Use __struct_size(@name) to get compile-time size of it afterwards. + */ +#define DEFINE_FLEX(type, name, member, count) \ + _DEFINE_FLEX(type, name, member, count, = {}) + #endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5c02720c53a5..735cddc13d20 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -693,6 +693,25 @@ TESTPAGEFLAG_FALSE(Ksm, ksm) u64 stable_page_flags(struct page *page); /** + * folio_xor_flags_has_waiters - Change some folio flags. + * @folio: The folio. + * @mask: Bits set in this word will be changed. + * + * This must only be used for flags which are changed with the folio + * lock held. For example, it is unsafe to use for PG_dirty as that + * can be set without the folio lock held. It can also only be used + * on flags which are in the range 0-6 as some of the implementations + * only affect those bits. + * + * Return: Whether there are tasks waiting on the folio. + */ +static inline bool folio_xor_flags_has_waiters(struct folio *folio, + unsigned long mask) +{ + return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0)); +} + +/** * folio_test_uptodate - Is this folio up to date? * @folio: The folio. * @@ -753,19 +772,14 @@ static __always_inline void SetPageUptodate(struct page *page) CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) -bool __folio_start_writeback(struct folio *folio, bool keep_write); -bool set_page_writeback(struct page *page); +void __folio_start_writeback(struct folio *folio, bool keep_write); +void set_page_writeback(struct page *page); #define folio_start_writeback(folio) \ __folio_start_writeback(folio, false) #define folio_start_writeback_keepwrite(folio) \ __folio_start_writeback(folio, true) -static inline bool test_set_page_writeback(struct page *page) -{ - return set_page_writeback(page); -} - static __always_inline bool folio_test_head(struct folio *folio) { return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY)); diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index e83c4c095041..3f2409b968ec 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -41,14 +41,14 @@ extern unsigned int pageblock_order; * Huge pages are a constant size, but don't exceed the maximum allocation * granularity. */ -#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER) +#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER) #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ #else /* CONFIG_HUGETLB_PAGE */ /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ -#define pageblock_order MAX_ORDER +#define pageblock_order MAX_PAGE_ORDER #endif /* CONFIG_HUGETLB_PAGE */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 351c3b7f93a1..2df35e65557d 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -204,6 +204,9 @@ enum mapping_flags { AS_NO_WRITEBACK_TAGS = 5, AS_LARGE_FOLIO_SUPPORT = 6, AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ + AS_STABLE_WRITES, /* must wait for writeback before modifying + folio contents */ + AS_UNMOVABLE, /* The mapping cannot be moved, ever */ }; /** @@ -289,6 +292,37 @@ static inline void mapping_clear_release_always(struct address_space *mapping) clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); } +static inline bool mapping_stable_writes(const struct address_space *mapping) +{ + return test_bit(AS_STABLE_WRITES, &mapping->flags); +} + +static inline void mapping_set_stable_writes(struct address_space *mapping) +{ + set_bit(AS_STABLE_WRITES, &mapping->flags); +} + +static inline void mapping_clear_stable_writes(struct address_space *mapping) +{ + clear_bit(AS_STABLE_WRITES, &mapping->flags); +} + +static inline void mapping_set_unmovable(struct address_space *mapping) +{ + /* + * It's expected unmovable mappings are also unevictable. Compaction + * migrate scanner (isolate_migratepages_block()) relies on this to + * reduce page locking. + */ + set_bit(AS_UNEVICTABLE, &mapping->flags); + set_bit(AS_UNMOVABLE, &mapping->flags); +} + +static inline bool mapping_unmovable(struct address_space *mapping) +{ + return test_bit(AS_UNMOVABLE, &mapping->flags); +} + static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return mapping->gfp_mask; @@ -789,9 +823,6 @@ static inline pgoff_t folio_next_index(struct folio *folio) */ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) { - /* HugeTLBfs indexes the page cache in units of hpage_size */ - if (folio_test_hugetlb(folio)) - return &folio->page; return folio_page(folio, index & (folio_nr_pages(folio) - 1)); } @@ -807,9 +838,6 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) */ static inline bool folio_contains(struct folio *folio, pgoff_t index) { - /* HugeTLBfs indexes the page cache in units of hpage_size */ - if (folio_test_hugetlb(folio)) - return folio->index == index; return index - folio_index(folio) < folio_nr_pages(folio); } @@ -867,10 +895,9 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping, } /* - * Get index of the page within radix-tree (but not for hugetlb pages). - * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) + * Get the offset in PAGE_SIZE (even for hugetlb pages). */ -static inline pgoff_t page_to_index(struct page *page) +static inline pgoff_t page_to_pgoff(struct page *page) { struct page *head; @@ -885,19 +912,6 @@ static inline pgoff_t page_to_index(struct page *page) return head->index + page - head; } -extern pgoff_t hugetlb_basepage_index(struct page *page); - -/* - * Get the offset in PAGE_SIZE (even for hugetlb pages). - * (TODO: hugetlb pages should have ->index in PAGE_SIZE) - */ -static inline pgoff_t page_to_pgoff(struct page *page) -{ - if (unlikely(PageHuge(page))) - return hugetlb_basepage_index(page); - return page_to_index(page); -} - /* * Return byte-offset into filesystem object for page. */ @@ -934,24 +948,16 @@ static inline loff_t folio_file_pos(struct folio *folio) /* * Get the offset in PAGE_SIZE (even for hugetlb folios). - * (TODO: hugetlb folios should have ->index in PAGE_SIZE) */ static inline pgoff_t folio_pgoff(struct folio *folio) { - if (unlikely(folio_test_hugetlb(folio))) - return hugetlb_basepage_index(&folio->page); return folio->index; } -extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, - unsigned long address); - static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; - if (unlikely(is_vm_hugetlb_page(vma))) - return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff; @@ -1129,6 +1135,7 @@ static inline void wait_on_page_locked(struct page *page) folio_wait_locked(page_folio(page)); } +void folio_end_read(struct folio *folio, bool success); void wait_on_page_writeback(struct page *page); void folio_wait_writeback(struct folio *folio); int folio_wait_writeback_killable(struct folio *folio); diff --git a/include/linux/parport.h b/include/linux/parport.h index 999eddd619b7..fff39bc30629 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -180,8 +180,6 @@ struct ieee1284_info { struct semaphore irq; }; -#define PARPORT_NAME_MAX_LEN 15 - /* A parallel port */ struct parport { unsigned long base; /* base address */ diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 6b1301e2498e..3a4860bd2758 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -93,6 +93,6 @@ extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */ #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) /* for DT-based PCI controllers that support ECAM */ int pci_host_common_probe(struct platform_device *pdev); -int pci_host_common_remove(struct platform_device *pdev); +void pci_host_common_remove(struct platform_device *pdev); #endif #endif diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 5cb694031072..40ea18f5aa02 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -19,13 +19,6 @@ enum pci_epc_interface_type { SECONDARY_INTERFACE, }; -enum pci_epc_irq_type { - PCI_EPC_IRQ_UNKNOWN, - PCI_EPC_IRQ_LEGACY, - PCI_EPC_IRQ_MSI, - PCI_EPC_IRQ_MSIX, -}; - static inline const char * pci_epc_interface_string(enum pci_epc_interface_type type) { @@ -79,7 +72,7 @@ struct pci_epc_ops { u16 interrupts, enum pci_barno, u32 offset); int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, - enum pci_epc_irq_type type, u16 interrupt_num); + unsigned int type, u16 interrupt_num); int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, u32 *msi_data, @@ -122,7 +115,7 @@ struct pci_epc_mem { * struct pci_epc - represents the PCI EPC device * @dev: PCI EPC device * @pci_epf: list of endpoint functions present in this EPC device - * list_lock: Mutex for protecting pci_epf list + * @list_lock: Mutex for protecting pci_epf list * @ops: function pointers for performing endpoint operations * @windows: array of address space of the endpoint controller * @mem: first window of the endpoint controller, which corresponds to @@ -229,7 +222,7 @@ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, u32 *msi_data, u32 *msi_addr_offset); int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, - enum pci_epc_irq_type type, u16 interrupt_num); + unsigned int type, u16 interrupt_num); int pci_epc_start(struct pci_epc *epc); void pci_epc_stop(struct pci_epc *epc); const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 3f44b6aec477..77b146e0f672 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -68,7 +68,7 @@ struct pci_epf_ops { }; /** - * struct pci_epf_event_ops - Callbacks for capturing the EPC events + * struct pci_epc_event_ops - Callbacks for capturing the EPC events * @core_init: Callback for the EPC initialization complete event * @link_up: Callback for the EPC link up event * @link_down: Callback for the EPC link down event @@ -98,7 +98,7 @@ struct pci_epf_driver { void (*remove)(struct pci_epf *epf); struct device_driver driver; - struct pci_epf_ops *ops; + const struct pci_epf_ops *ops; struct module *owner; struct list_head epf_group; const struct pci_epf_device_id *id_table; diff --git a/include/linux/pci.h b/include/linux/pci.h index 8c7c2c3c6c65..7ab0d13672da 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -713,6 +713,31 @@ static inline bool pci_is_bridge(struct pci_dev *dev) dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; } +/** + * pci_is_vga - check if the PCI device is a VGA device + * @pdev: PCI device + * + * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define + * VGA Base Class and Sub-Classes: + * + * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible + * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code) + * + * Return true if the PCI device is a VGA device and uses the legacy VGA + * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and + * aliases). + */ +static inline bool pci_is_vga(struct pci_dev *pdev) +{ + if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) + return true; + + if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA) + return true; + + return false; +} + #define for_each_pci_bridge(dev, bus) \ list_for_each_entry(dev, &bus->devices, bus_list) \ if (!pci_is_bridge(dev)) {} else @@ -861,7 +886,6 @@ struct module; /** * struct pci_driver - PCI driver structure - * @node: List of driver structures. * @name: Driver name. * @id_table: Pointer to table of device IDs the driver is * interested in. Most drivers should export this @@ -916,7 +940,6 @@ struct module; * own I/O address space. */ struct pci_driver { - struct list_head node; const char *name; const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */ int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ @@ -1049,11 +1072,13 @@ enum { PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ }; -#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ +#define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */ #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ +#define PCI_IRQ_LEGACY PCI_IRQ_INTX /* Deprecated! Use PCI_IRQ_INTX */ + /* These external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI @@ -1146,6 +1171,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); struct pci_dev *pci_dev_get(struct pci_dev *dev); void pci_dev_put(struct pci_dev *dev); +DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T)) void pci_remove_bus(struct pci_bus *b); void pci_stop_and_remove_bus_device(struct pci_dev *dev); void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev); @@ -1181,6 +1207,8 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn); struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); +struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from); + int pci_dev_present(const struct pci_device_id *ids); int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, @@ -1213,6 +1241,8 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set); int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); @@ -1338,6 +1368,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps); u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, enum pci_bus_speed *speed, enum pcie_link_width *width); +int pcie_link_speed_mbps(struct pci_dev *pdev); void pcie_print_link_status(struct pci_dev *dev); int pcie_reset_flr(struct pci_dev *dev, bool probe); int pcie_flr(struct pci_dev *dev); @@ -1391,6 +1422,7 @@ int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state); int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); +int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); void pci_pme_active(struct pci_dev *dev, bool enable); @@ -1594,6 +1626,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata); +void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata); int pci_cfg_space_size(struct pci_dev *dev); unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); @@ -1624,6 +1658,8 @@ struct msix_entry { u16 entry; /* Driver uses to specify entry, OS writes */ }; +struct msi_domain_template; + #ifdef CONFIG_PCI_MSI int pci_msi_vec_count(struct pci_dev *dev); void pci_disable_msi(struct pci_dev *dev); @@ -1656,6 +1692,11 @@ void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map); void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); +bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template, + unsigned int hwsize, void *data); +struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie, + const struct irq_affinity_desc *affdesc); +void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map); #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1719,6 +1760,25 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, { return cpu_possible_mask; } + +static inline bool pci_create_ims_domain(struct pci_dev *pdev, + const struct msi_domain_template *template, + unsigned int hwsize, void *data) +{ return false; } + +static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, + union msi_instance_cookie *icookie, + const struct irq_affinity_desc *affdesc) +{ + struct msi_map map = { .index = -ENOSYS, }; + + return map; +} + +static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map) +{ +} + #endif /** @@ -1777,6 +1837,7 @@ extern bool pcie_ports_native; int pci_disable_link_state(struct pci_dev *pdev, int state); int pci_disable_link_state_locked(struct pci_dev *pdev, int state); int pci_enable_link_state(struct pci_dev *pdev, int state); +int pci_enable_link_state_locked(struct pci_dev *pdev, int state); void pcie_no_aspm(void); bool pcie_aspm_support_enabled(void); bool pcie_aspm_enabled(struct pci_dev *pdev); @@ -1787,6 +1848,8 @@ static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state) { return 0; } static inline int pci_enable_link_state(struct pci_dev *pdev, int state) { return 0; } +static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state) +{ return 0; } static inline void pcie_no_aspm(void) { } static inline bool pcie_aspm_support_enabled(void) { return false; } static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } @@ -1819,6 +1882,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev); void pci_dev_lock(struct pci_dev *dev); int pci_dev_trylock(struct pci_dev *dev); void pci_dev_unlock(struct pci_dev *dev); +DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T)) /* * PCI domain support. Sometimes called PCI segment (eg by ACPI), @@ -1924,6 +1988,9 @@ static inline struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) { return NULL; } +static inline struct pci_dev *pci_get_base_class(unsigned int class, + struct pci_dev *from) +{ return NULL; } static inline int pci_dev_present(const struct pci_device_id *ids) { return 0; } @@ -1961,6 +2028,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; } static inline void pci_restore_state(struct pci_dev *dev) { } static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return 0; } +static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state) +{ return 0; } static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable) { return 0; } static inline pci_power_t pci_choose_state(struct pci_dev *dev, @@ -2072,14 +2141,14 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); (pci_resource_end((dev), (bar)) ? \ resource_size(pci_resource_n((dev), (bar))) : 0) -#define __pci_dev_for_each_res0(dev, res, ...) \ - for (unsigned int __b = 0; \ - res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \ +#define __pci_dev_for_each_res0(dev, res, ...) \ + for (unsigned int __b = 0; \ + __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \ __b++) -#define __pci_dev_for_each_res1(dev, res, __b) \ - for (__b = 0; \ - res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \ +#define __pci_dev_for_each_res1(dev, res, __b) \ + for (__b = 0; \ + __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \ __b++) #define pci_dev_for_each_resource(dev, res, ...) \ @@ -2616,14 +2685,6 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); #endif -struct msi_domain_template; - -bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template, - unsigned int hwsize, void *data); -struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie, - const struct irq_affinity_desc *affdesc); -void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map); - #include <linux/dma-mapping.h> #define pci_printk(level, pdev, fmt, arg...) \ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 5fb3d4c393a9..a0c75e467df3 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -180,6 +180,8 @@ #define PCI_DEVICE_ID_BERKOM_A4T 0xffa4 #define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8 +#define PCI_VENDOR_ID_ITTIM 0x0b48 + #define PCI_VENDOR_ID_COMPAQ 0x0e11 #define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508 #define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc @@ -579,6 +581,8 @@ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3 #define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb #define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3 +#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b +#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -2601,6 +2605,8 @@ #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 +#define PCI_VENDOR_ID_ALIBABA 0x1ded + #define PCI_VENDOR_ID_TEHUTI 0x1fc9 #define PCI_DEVICE_ID_TEHUTI_3009 0x3009 #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 @@ -3061,6 +3067,7 @@ #define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 #define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2 #define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 +#define PCI_DEVICE_ID_INTEL_HDA_ARL 0x7728 #define PCI_DEVICE_ID_INTEL_HDA_RPL_S 0x7a50 #define PCI_DEVICE_ID_INTEL_HDA_ADL_S 0x7ad0 #define PCI_DEVICE_ID_INTEL_HDA_MTL 0x7e28 diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h index e838a2b90440..17a87c1a55d7 100644 --- a/include/linux/pds/pds_core_if.h +++ b/include/linux/pds/pds_core_if.h @@ -79,6 +79,7 @@ enum pds_core_status_code { PDS_RC_EVFID = 31, /* VF ID does not exist */ PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */ PDS_RC_ECLIENT = 33, /* No such client id */ + PDS_RC_BAD_PCI = 255, /* Broken PCI when reading status */ }; /** diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 68fac2e7cbe6..8c677f185901 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -132,6 +132,7 @@ extern void __init setup_per_cpu_areas(void); extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1); extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1); extern void free_percpu(void __percpu *__pdata); +extern size_t pcpu_alloc_size(void __percpu *__pdata); DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index d01351b1526f..3a44dd1e33d2 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -57,6 +57,8 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); +bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, + s64 amount, s32 batch); void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) @@ -69,6 +71,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) percpu_counter_add_batch(fbc, amount, percpu_counter_batch); } +static inline bool +percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) +{ + return __percpu_counter_limited_add(fbc, limit, amount, + percpu_counter_batch); +} + /* * With percpu_counter_add_local() and percpu_counter_sub_local(), counts * are accumulated in local per cpu counter and not in fbc->count until @@ -185,6 +194,27 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount) local_irq_restore(flags); } +static inline bool +percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) +{ + unsigned long flags; + bool good = false; + s64 count; + + if (amount == 0) + return true; + + local_irq_save(flags); + count = fbc->count + amount; + if ((amount > 0 && count <= limit) || + (amount < 0 && count >= limit)) { + fbc->count = count; + good = true; + } + local_irq_restore(flags); + return good; +} + /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ static inline void percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 143fbc10ecfe..b3b34f6670cf 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -60,12 +60,6 @@ struct pmu_hw_events { DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); /* - * Hardware lock to serialize accesses to PMU registers. Needed for the - * read/modify/write sequences. - */ - raw_spinlock_t pmu_lock; - - /* * When using percpu IRQs, we need a percpu dev_id. Place it here as we * already have to allocate this struct per cpu. */ @@ -189,4 +183,26 @@ void armpmu_free_irq(int irq, int cpu); #define ARMV8_SPE_PDEV_NAME "arm,spe-v1" #define ARMV8_TRBE_PDEV_NAME "arm,trbe" +/* Why does everything I do descend into this? */ +#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ + (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi + +#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ + __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) + +#define GEN_PMU_FORMAT_ATTR(name) \ + PMU_FORMAT_ATTR(name, \ + _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \ + ATTR_CFG_FLD_##name##_LO, \ + ATTR_CFG_FLD_##name##_HI)) + +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \ + ((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0)) + +#define ATTR_CFG_GET_FLD(attr, name) \ + _ATTR_CFG_GET_FLD(attr, \ + ATTR_CFG_FLD_##name##_CFG, \ + ATTR_CFG_FLD_##name##_LO, \ + ATTR_CFG_FLD_##name##_HI) + #endif /* __ARM_PMU_H__ */ diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index e3899bd77f5c..46377e134d67 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -215,45 +215,54 @@ #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ #define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ -#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ -#define ARMV8_PMU_PMCR_N_MASK 0x1f -#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ +#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */ +/* Mask for writable bits */ +#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \ + ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \ + ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \ + ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP) /* * PMOVSR: counters overflow flag status reg */ -#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ -#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK +#define ARMV8_PMU_OVSR_P GENMASK(30, 0) +#define ARMV8_PMU_OVSR_C BIT(31) +/* Mask for writable bits is both P and C fields */ +#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C) /* * PMXEVTYPER: Event selection reg */ -#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ -#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ +#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */ +#define ARMV8_PMU_EVTYPE_TH GENMASK_ULL(43, 32) /* arm64 only */ +#define ARMV8_PMU_EVTYPE_TC GENMASK_ULL(63, 61) /* arm64 only */ /* * Event filters for PMUv3 */ -#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) -#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) -#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) +#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) +#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) +#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29) +#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28) +#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) +#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26) /* * PMUSERENR: user enable reg */ -#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ #define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ +/* Mask for writable bits */ +#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \ + ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER) /* PMMIR_EL1.SLOTS mask */ -#define ARMV8_PMU_SLOTS_MASK 0xff - -#define ARMV8_PMU_BUS_SLOTS_SHIFT 8 -#define ARMV8_PMU_BUS_SLOTS_MASK 0xff -#define ARMV8_PMU_BUS_WIDTH_SHIFT 16 -#define ARMV8_PMU_BUS_WIDTH_MASK 0xf +#define ARMV8_PMU_SLOTS GENMASK(7, 0) +#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8) +#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16) +#define ARMV8_PMU_THWIDTH GENMASK(23, 20) /* * This code is really good diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e85cd1c0eaf3..d2a15c0c6f8a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -704,6 +704,7 @@ struct perf_event { /* The cumulative AND of all event_caps for events in this group. */ int group_caps; + unsigned int group_generation; struct perf_event *group_leader; /* * event->pmu will always point to pmu in which this event belongs. @@ -842,11 +843,11 @@ struct perf_event { }; /* - * ,-----------------------[1:n]----------------------. - * V V - * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event - * ^ ^ | | - * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-' + * ,-----------------------[1:n]------------------------. + * V V + * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event + * | | + * `--[n:1]-> pmu <-[1:n]--' * * * struct perf_event_pmu_context lifetime is refcount based and RCU freed @@ -864,6 +865,9 @@ struct perf_event { * ctx->mutex pinning the configuration. Since we hold a reference on * group_leader (through the filedesc) it can't go away, therefore it's * associated pmu_ctx must exist and cannot change due to ctx->mutex. + * + * perf_event holds a refcount on perf_event_context + * perf_event holds a refcount on perf_event_pmu_context */ struct perf_event_pmu_context { struct pmu *pmu; @@ -878,6 +882,7 @@ struct perf_event_pmu_context { unsigned int embedded : 1; unsigned int nr_events; + unsigned int nr_cgroups; atomic_t refcount; /* event <-> epc */ struct rcu_head rcu_head; @@ -1138,6 +1143,15 @@ static inline bool branch_sample_priv(const struct perf_event *event) return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE; } +static inline bool branch_sample_counters(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; +} + +static inline bool branch_sample_call_stack(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; +} struct perf_sample_data { /* @@ -1172,6 +1186,7 @@ struct perf_sample_data { struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; union perf_sample_weight weight; union perf_mem_data_src data_src; u64 txn; @@ -1249,7 +1264,8 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data, static inline void perf_sample_save_brstack(struct perf_sample_data *data, struct perf_event *event, - struct perf_branch_stack *brs) + struct perf_branch_stack *brs, + u64 *brs_cntr) { int size = sizeof(u64); /* nr */ @@ -1257,7 +1273,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data, size += sizeof(u64); size += brs->nr * sizeof(struct perf_branch_entry); + /* + * The extension space for counters is appended after the + * struct perf_branch_stack. It is used to store the occurrences + * of events of each branch. + */ + if (brs_cntr) + size += brs->nr * sizeof(u64); + data->br_stack = brs; + data->br_stack_cntr = brs_cntr; data->dyn_size += size; data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; } @@ -1573,7 +1598,7 @@ extern int sysctl_perf_cpu_time_max_percent; extern void perf_sample_event_took(u64 sample_len_ns); -int perf_proc_update_handler(struct ctl_table *table, int write, +int perf_event_max_sample_rate_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 1fba072b3dac..f6d0e3513948 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -184,6 +184,13 @@ static inline int pmd_young(pmd_t pmd) } #endif +#ifndef pmd_dirty +static inline int pmd_dirty(pmd_t pmd) +{ + return 0; +} +#endif + /* * A facility to provide lazy MMU batching. This allows PTE updates and * page invalidations to be delayed until a call to leave lazy MMU mode @@ -206,6 +213,14 @@ static inline int pmd_young(pmd_t pmd) #endif #ifndef set_ptes + +#ifndef pte_next_pfn +static inline pte_t pte_next_pfn(pte_t pte) +{ + return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); +} +#endif + /** * set_ptes - Map consecutive pages to a contiguous range of addresses. * @mm: Address space to map the pages into. @@ -231,7 +246,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, if (--nr == 0) break; ptep++; - pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); + pte = pte_next_pfn(pte); } arch_leave_lazy_mmu_mode(); } @@ -284,6 +299,27 @@ static inline pmd_t pmdp_get(pmd_t *pmdp) } #endif +#ifndef pudp_get +static inline pud_t pudp_get(pud_t *pudp) +{ + return READ_ONCE(*pudp); +} +#endif + +#ifndef p4dp_get +static inline p4d_t p4dp_get(p4d_t *p4dp) +{ + return READ_ONCE(*p4dp); +} +#endif + +#ifndef pgdp_get +static inline pgd_t pgdp_get(pgd_t *pgdp) +{ + return READ_ONCE(*pgdp); +} +#endif + #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, @@ -367,7 +403,7 @@ static inline bool arch_has_hw_nonleaf_pmd_young(void) */ static inline bool arch_has_hw_pte_young(void) { - return false; + return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG); } #endif diff --git a/include/linux/phy.h b/include/linux/phy.h index 1351b802ffcf..684efaeca07c 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -327,7 +327,8 @@ struct mdio_bus_stats { /** * struct phy_package_shared - Shared information in PHY packages - * @addr: Common PHY address used to combine PHYs in one package + * @base_addr: Base PHY address of PHY package used to combine PHYs + * in one package and for offset calculation of phy_package_read/write * @refcnt: Number of PHYs connected to this shared data * @flags: Initialization of PHY package * @priv_size: Size of the shared private data @priv @@ -338,7 +339,7 @@ struct mdio_bus_stats { * phy_package_leave(). */ struct phy_package_shared { - int addr; + u8 base_addr; refcount_t refcnt; unsigned long flags; size_t priv_size; @@ -568,7 +569,6 @@ struct macsec_ops; * - Bits [31:24] are reserved for defining generic * PHY driver behavior. * @irq: IRQ number of the PHY's interrupt (-1 if none) - * @phy_timer: The timer for handling the state machine * @phylink: Pointer to phylink instance for this PHY * @sfp_bus_attached: Flag indicating whether the SFP bus has been attached * @sfp_bus: SFP bus attached to this PHY's fiber port @@ -605,6 +605,8 @@ struct macsec_ops; * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended, * requiring a rerun of the interrupt handler after resume * @interface: enum phy_interface_t value + * @possible_interfaces: bitmap if interface modes that the attached PHY + * will switch between depending on media speed. * @skb: Netlink message for cable diagnostics * @nest: Netlink nest used for cable diagnostics * @ehdr: nNtlink header for cable diagnostics @@ -674,6 +676,7 @@ struct phy_device { u32 dev_flags; phy_interface_t interface; + DECLARE_PHY_INTERFACE_MASK(possible_interfaces); /* * forced speed & duplex (no autoneg) @@ -1560,9 +1563,11 @@ static inline bool phy_has_txtstamp(struct phy_device *phydev) return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp; } -static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) +static inline int phy_hwtstamp(struct phy_device *phydev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { - return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr); + return phydev->mii_ts->hwtstamp(phydev->mii_ts, cfg, extack); } static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb, @@ -1736,6 +1741,7 @@ void phy_detach(struct phy_device *phydev); void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); int phy_config_aneg(struct phy_device *phydev); +int _phy_start_aneg(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); int phy_speed_down(struct phy_device *phydev, bool sync); @@ -1860,6 +1866,7 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); +int genphy_c45_pma_read_ext_abilities(struct phy_device *phydev); int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev); int genphy_c45_read_eee_abilities(struct phy_device *phydev); int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev); @@ -1969,10 +1976,10 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev, int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); int phy_ethtool_nway_reset(struct net_device *ndev); -int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size); +int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size); void phy_package_leave(struct phy_device *phydev); int devm_phy_package_join(struct device *dev, struct phy_device *phydev, - int addr, size_t priv_size); + int base_addr, size_t priv_size); int __init mdio_bus_init(void); void mdio_bus_exit(void); @@ -1995,48 +2002,83 @@ int __phy_hwtstamp_set(struct phy_device *phydev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack); -static inline int phy_package_read(struct phy_device *phydev, u32 regnum) +static inline int phy_package_address(struct phy_device *phydev, + unsigned int addr_offset) { struct phy_package_shared *shared = phydev->shared; + u8 base_addr = shared->base_addr; - if (!shared) + if (addr_offset >= PHY_MAX_ADDR - base_addr) return -EIO; - return mdiobus_read(phydev->mdio.bus, shared->addr, regnum); + /* we know that addr will be in the range 0..31 and thus the + * implicit cast to a signed int is not a problem. + */ + return base_addr + addr_offset; } -static inline int __phy_package_read(struct phy_device *phydev, u32 regnum) +static inline int phy_package_read(struct phy_device *phydev, + unsigned int addr_offset, u32 regnum) { - struct phy_package_shared *shared = phydev->shared; + int addr = phy_package_address(phydev, addr_offset); - if (!shared) - return -EIO; + if (addr < 0) + return addr; + + return mdiobus_read(phydev->mdio.bus, addr, regnum); +} + +static inline int __phy_package_read(struct phy_device *phydev, + unsigned int addr_offset, u32 regnum) +{ + int addr = phy_package_address(phydev, addr_offset); + + if (addr < 0) + return addr; - return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum); + return __mdiobus_read(phydev->mdio.bus, addr, regnum); } static inline int phy_package_write(struct phy_device *phydev, - u32 regnum, u16 val) + unsigned int addr_offset, u32 regnum, + u16 val) { - struct phy_package_shared *shared = phydev->shared; + int addr = phy_package_address(phydev, addr_offset); - if (!shared) - return -EIO; + if (addr < 0) + return addr; - return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); + return mdiobus_write(phydev->mdio.bus, addr, regnum, val); } static inline int __phy_package_write(struct phy_device *phydev, - u32 regnum, u16 val) + unsigned int addr_offset, u32 regnum, + u16 val) { - struct phy_package_shared *shared = phydev->shared; + int addr = phy_package_address(phydev, addr_offset); - if (!shared) - return -EIO; + if (addr < 0) + return addr; - return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); + return __mdiobus_write(phydev->mdio.bus, addr, regnum, val); } +int __phy_package_read_mmd(struct phy_device *phydev, + unsigned int addr_offset, int devad, + u32 regnum); + +int phy_package_read_mmd(struct phy_device *phydev, + unsigned int addr_offset, int devad, + u32 regnum); + +int __phy_package_write_mmd(struct phy_device *phydev, + unsigned int addr_offset, int devad, + u32 regnum, u16 val); + +int phy_package_write_mmd(struct phy_device *phydev, + unsigned int addr_offset, int devad, + u32 regnum, u16 val); + static inline bool __phy_package_set_once(struct phy_device *phydev, unsigned int b) { diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 7d07f8736431..d589f89c612c 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -99,72 +99,6 @@ static inline bool phylink_autoneg_inband(unsigned int mode) } /** - * phylink_pcs_neg_mode() - helper to determine PCS inband mode - * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. - * @interface: interface mode to be used - * @advertising: adertisement ethtool link mode mask - * - * Determines the negotiation mode to be used by the PCS, and returns - * one of: - * - * - %PHYLINK_PCS_NEG_NONE: interface mode does not support inband - * - %PHYLINK_PCS_NEG_OUTBAND: an out of band mode (e.g. reading the PHY) - * will be used. - * - %PHYLINK_PCS_NEG_INBAND_DISABLED: inband mode selected but autoneg - * disabled - * - %PHYLINK_PCS_NEG_INBAND_ENABLED: inband mode selected and autoneg enabled - * - * Note: this is for cases where the PCS itself is involved in negotiation - * (e.g. Clause 37, SGMII and similar) not Clause 73. - */ -static inline unsigned int phylink_pcs_neg_mode(unsigned int mode, - phy_interface_t interface, - const unsigned long *advertising) -{ - unsigned int neg_mode; - - switch (interface) { - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_QSGMII: - case PHY_INTERFACE_MODE_QUSGMII: - case PHY_INTERFACE_MODE_USXGMII: - /* These protocols are designed for use with a PHY which - * communicates its negotiation result back to the MAC via - * inband communication. Note: there exist PHYs that run - * with SGMII but do not send the inband data. - */ - if (!phylink_autoneg_inband(mode)) - neg_mode = PHYLINK_PCS_NEG_OUTBAND; - else - neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED; - break; - - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - /* 1000base-X is designed for use media-side for Fibre - * connections, and thus the Autoneg bit needs to be - * taken into account. We also do this for 2500base-X - * as well, but drivers may not support this, so may - * need to override this. - */ - if (!phylink_autoneg_inband(mode)) - neg_mode = PHYLINK_PCS_NEG_OUTBAND; - else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - advertising)) - neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED; - else - neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED; - break; - - default: - neg_mode = PHYLINK_PCS_NEG_NONE; - break; - } - - return neg_mode; -} - -/** * struct phylink_link_state - link state structure * @advertising: ethtool bitmask containing advertised link modes * @lp_advertising: ethtool bitmask containing link partner advertised link @@ -227,7 +161,7 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); /** * struct phylink_mac_ops - MAC operations structure. - * @validate: Validate and update the link configuration. + * @mac_get_caps: Get MAC capabilities for interface mode. * @mac_select_pcs: Select a PCS for the interface mode. * @mac_prepare: prepare for a major reconfiguration of the interface. * @mac_config: configure the MAC for the selected mode and state. @@ -238,9 +172,8 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); * The individual methods are described more fully below. */ struct phylink_mac_ops { - void (*validate)(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state); + unsigned long (*mac_get_caps)(struct phylink_config *config, + phy_interface_t interface); struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config, phy_interface_t interface); int (*mac_prepare)(struct phylink_config *config, unsigned int mode, @@ -259,39 +192,17 @@ struct phylink_mac_ops { #if 0 /* For kernel-doc purposes only. */ /** - * validate - Validate and update the link configuration + * mac_get_caps: Get MAC capabilities for interface mode. * @config: a pointer to a &struct phylink_config. - * @supported: ethtool bitmask for supported link modes. - * @state: a pointer to a &struct phylink_link_state. - * - * Clear bits in the @supported and @state->advertising masks that - * are not supportable by the MAC. - * - * Note that the PHY may be able to transform from one connection - * technology to another, so, eg, don't clear 1000BaseX just - * because the MAC is unable to BaseX mode. This is more about - * clearing unsupported speeds and duplex settings. The port modes - * should not be cleared; phylink_set_port_modes() will help with this. - * - * When @config->supported_interfaces has been set, phylink will iterate - * over the supported interfaces to determine the full capability of the - * MAC. The validation function must not print errors if @state->interface - * is set to an unexpected value. + * @interface: PHY interface mode. * - * When @config->supported_interfaces is empty, phylink will call this - * function with @state->interface set to %PHY_INTERFACE_MODE_NA, and - * expects the MAC driver to return all supported link modes. - * - * If the @state->interface mode is not supported, then the @supported - * mask must be cleared. - * - * This member is optional; if not set, the generic validator will be - * used making use of @config->mac_capabilities and - * @config->supported_interfaces to determine which link modes are - * supported. + * Optional method. When not provided, config->mac_capabilities will be used. + * When implemented, this returns the MAC capabilities for the specified + * interface mode where there is some special handling required by the MAC + * driver (e.g. not supporting half-duplex in certain interface modes.) */ -void validate(struct phylink_config *config, unsigned long *supported, - struct phylink_link_state *state); +unsigned long mac_get_caps(struct phylink_config *config, + phy_interface_t interface); /** * mac_select_pcs: Select a PCS for the interface mode. * @config: a pointer to a &struct phylink_config. @@ -600,7 +511,7 @@ void pcs_get_state(struct phylink_pcs *pcs, * * The %neg_mode argument should be tested via the phylink_mode_*() family of * functions, or for PCS that set pcs->neg_mode true, should be tested - * against the %PHYLINK_PCS_NEG_* definitions. + * against the PHYLINK_PCS_NEG_* definitions. */ int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, @@ -630,23 +541,12 @@ void pcs_an_restart(struct phylink_pcs *pcs); * * The %mode argument should be tested via the phylink_mode_*() family of * functions, or for PCS that set pcs->neg_mode true, should be tested - * against the %PHYLINK_PCS_NEG_* definitions. + * against the PHYLINK_PCS_NEG_* definitions. */ void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex); #endif -void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps); -unsigned long phylink_get_capabilities(phy_interface_t interface, - unsigned long mac_capabilities, - int rate_matching); -void phylink_validate_mask_caps(unsigned long *supported, - struct phylink_link_state *state, - unsigned long caps); -void phylink_generic_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state); - struct phylink *phylink_create(struct phylink_config *, const struct fwnode_handle *, phy_interface_t, diff --git a/include/linux/pid.h b/include/linux/pid.h index 653a527574c4..395cacce1179 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -2,18 +2,12 @@ #ifndef _LINUX_PID_H #define _LINUX_PID_H +#include <linux/pid_types.h> #include <linux/rculist.h> -#include <linux/wait.h> +#include <linux/rcupdate.h> #include <linux/refcount.h> - -enum pid_type -{ - PIDTYPE_PID, - PIDTYPE_TGID, - PIDTYPE_PGID, - PIDTYPE_SID, - PIDTYPE_MAX, -}; +#include <linux/sched.h> +#include <linux/wait.h> /* * What is struct pid? @@ -110,9 +104,6 @@ extern void exchange_tids(struct task_struct *task, struct task_struct *old); extern void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type); -struct pid_namespace; -extern struct pid_namespace init_pid_ns; - extern int pid_max; extern int pid_max_min, pid_max_max; @@ -215,4 +206,127 @@ pid_t pid_vnr(struct pid *pid); } \ task = tg___; \ } while_each_pid_task(pid, type, task) + +static inline struct pid *task_pid(struct task_struct *task) +{ + return task->thread_pid; +} + +/* + * the helpers to get the task's different pids as they are seen + * from various namespaces + * + * task_xid_nr() : global id, i.e. the id seen from the init namespace; + * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of + * current. + * task_xid_nr_ns() : id seen from the ns specified; + * + * see also pid_nr() etc in include/linux/pid.h + */ +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); + +static inline pid_t task_pid_nr(struct task_struct *tsk) +{ + return tsk->pid; +} + +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); +} + +static inline pid_t task_pid_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); +} + + +static inline pid_t task_tgid_nr(struct task_struct *tsk) +{ + return tsk->tgid; +} + +/** + * pid_alive - check that a task structure is not stale + * @p: Task structure to be checked. + * + * Test if a process is not yet dead (at most zombie state) + * If pid_alive fails, then pointers within the task structure + * can be stale and must not be dereferenced. + * + * Return: 1 if the process is alive. 0 otherwise. + */ +static inline int pid_alive(const struct task_struct *p) +{ + return p->thread_pid != NULL; +} + +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); +} + +static inline pid_t task_pgrp_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); +} + + +static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); +} + +static inline pid_t task_session_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); +} + +static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); +} + +static inline pid_t task_tgid_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); +} + +static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) +{ + pid_t pid = 0; + + rcu_read_lock(); + if (pid_alive(tsk)) + pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); + rcu_read_unlock(); + + return pid; +} + +static inline pid_t task_ppid_nr(const struct task_struct *tsk) +{ + return task_ppid_nr_ns(tsk, &init_pid_ns); +} + +/* Obsolete, do not use: */ +static inline pid_t task_pgrp_nr(struct task_struct *tsk) +{ + return task_pgrp_nr_ns(tsk, &init_pid_ns); +} + +/** + * is_global_init - check if a task structure is init. Since init + * is free to have sub-threads we need to check tgid. + * @tsk: Task structure to be checked. + * + * Check if a task structure is the first user space task the kernel created. + * + * Return: 1 if the task structure is init. 0 otherwise. + */ +static inline int is_global_init(struct task_struct *tsk) +{ + return task_tgid_nr(tsk) == 1; +} + #endif /* _LINUX_PID_H */ diff --git a/include/linux/pid_types.h b/include/linux/pid_types.h new file mode 100644 index 000000000000..c2aee1d91dcf --- /dev/null +++ b/include/linux/pid_types.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PID_TYPES_H +#define _LINUX_PID_TYPES_H + +enum pid_type { + PIDTYPE_PID, + PIDTYPE_TGID, + PIDTYPE_PGID, + PIDTYPE_SID, + PIDTYPE_MAX, +}; + +struct pid_namespace; +extern struct pid_namespace init_pid_ns; + +#endif /* _LINUX_PID_TYPES_H */ diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 4729d54e8995..73de70362b98 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -17,6 +17,7 @@ #include <linux/pinctrl/pinctrl-state.h> struct device; +struct gpio_chip; /* This struct is private to the core and should be regarded as a cookie */ struct pinctrl; @@ -25,27 +26,30 @@ struct pinctrl_state; #ifdef CONFIG_PINCTRL /* External interface to pin control */ -extern bool pinctrl_gpio_can_use_line(unsigned gpio); -extern int pinctrl_gpio_request(unsigned gpio); -extern void pinctrl_gpio_free(unsigned gpio); -extern int pinctrl_gpio_direction_input(unsigned gpio); -extern int pinctrl_gpio_direction_output(unsigned gpio); -extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config); - -extern struct pinctrl * __must_check pinctrl_get(struct device *dev); -extern void pinctrl_put(struct pinctrl *p); -extern struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p, - const char *name); -extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s); - -extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev); -extern void devm_pinctrl_put(struct pinctrl *p); -extern int pinctrl_select_default_state(struct device *dev); +bool pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset); +int pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset); +void pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset); +int pinctrl_gpio_direction_input(struct gpio_chip *gc, + unsigned int offset); +int pinctrl_gpio_direction_output(struct gpio_chip *gc, + unsigned int offset); +int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config); + +struct pinctrl * __must_check pinctrl_get(struct device *dev); +void pinctrl_put(struct pinctrl *p); +struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p, + const char *name); +int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s); + +struct pinctrl * __must_check devm_pinctrl_get(struct device *dev); +void devm_pinctrl_put(struct pinctrl *p); +int pinctrl_select_default_state(struct device *dev); #ifdef CONFIG_PM -extern int pinctrl_pm_select_default_state(struct device *dev); -extern int pinctrl_pm_select_sleep_state(struct device *dev); -extern int pinctrl_pm_select_idle_state(struct device *dev); +int pinctrl_pm_select_default_state(struct device *dev); +int pinctrl_pm_select_sleep_state(struct device *dev); +int pinctrl_pm_select_idle_state(struct device *dev); #else static inline int pinctrl_pm_select_default_state(struct device *dev) { @@ -63,31 +67,38 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev) #else /* !CONFIG_PINCTRL */ -static inline bool pinctrl_gpio_can_use_line(unsigned gpio) +static inline bool +pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset) { return true; } -static inline int pinctrl_gpio_request(unsigned gpio) +static inline int +pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset) { return 0; } -static inline void pinctrl_gpio_free(unsigned gpio) +static inline void +pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset) { } -static inline int pinctrl_gpio_direction_input(unsigned gpio) +static inline int +pinctrl_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) { return 0; } -static inline int pinctrl_gpio_direction_output(unsigned gpio) +static inline int +pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset) { return 0; } -static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config) +static inline int +pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config) { return 0; } diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h index 0639b36f43c5..673e96df453b 100644 --- a/include/linux/pinctrl/machine.h +++ b/include/linux/pinctrl/machine.h @@ -11,7 +11,7 @@ #ifndef __LINUX_PINCTRL_MACHINE_H #define __LINUX_PINCTRL_MACHINE_H -#include <linux/kernel.h> /* ARRAY_SIZE() */ +#include <linux/array_size.h> #include <linux/pinctrl/pinctrl-state.h> @@ -47,7 +47,7 @@ struct pinctrl_map_mux { struct pinctrl_map_configs { const char *group_or_pin; unsigned long *configs; - unsigned num_configs; + unsigned int num_configs; }; /** @@ -154,13 +154,13 @@ struct pinctrl_map; #ifdef CONFIG_PINCTRL extern int pinctrl_register_mappings(const struct pinctrl_map *map, - unsigned num_maps); + unsigned int num_maps); extern void pinctrl_unregister_mappings(const struct pinctrl_map *map); extern void pinctrl_provide_dummies(void); #else static inline int pinctrl_register_mappings(const struct pinctrl_map *map, - unsigned num_maps) + unsigned int num_maps) { return 0; } diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index d74b7a4ea154..a65d3d078e58 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -193,17 +193,17 @@ struct pinconf_generic_params { int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, - unsigned *reserved_maps, unsigned *num_maps, + unsigned int *reserved_maps, unsigned int *num_maps, enum pinctrl_map_type type); int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, - unsigned *num_maps, enum pinctrl_map_type type); + unsigned int *num_maps, enum pinctrl_map_type type); void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev, - struct pinctrl_map *map, unsigned num_maps); + struct pinctrl_map *map, unsigned int num_maps); static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, - unsigned *num_maps) + unsigned int *num_maps) { return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, PIN_MAP_TYPE_CONFIGS_GROUP); @@ -211,7 +211,7 @@ static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctld static inline int pinconf_generic_dt_node_to_map_pin(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, - unsigned *num_maps) + unsigned int *num_maps) { return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, PIN_MAP_TYPE_CONFIGS_PIN); diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h index f8a8215e9021..770ec2221156 100644 --- a/include/linux/pinctrl/pinconf.h +++ b/include/linux/pinctrl/pinconf.h @@ -40,25 +40,25 @@ struct pinconf_ops { bool is_generic; #endif int (*pin_config_get) (struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *config); int (*pin_config_set) (struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *configs, - unsigned num_configs); + unsigned int num_configs); int (*pin_config_group_get) (struct pinctrl_dev *pctldev, - unsigned selector, + unsigned int selector, unsigned long *config); int (*pin_config_group_set) (struct pinctrl_dev *pctldev, - unsigned selector, + unsigned int selector, unsigned long *configs, - unsigned num_configs); + unsigned int num_configs); void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, - unsigned offset); + unsigned int offset); void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, - unsigned selector); + unsigned int selector); void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, unsigned long config); diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 4d252ea00ed1..9a8189ffd0f2 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -54,7 +54,7 @@ struct pingroup { * @drv_data: driver-defined per-pin data. pinctrl core does not touch this */ struct pinctrl_pin_desc { - unsigned number; + unsigned int number; const char *name; void *drv_data; }; @@ -82,7 +82,7 @@ struct pinctrl_gpio_range { unsigned int base; unsigned int pin_base; unsigned int npins; - unsigned const *pins; + unsigned int const *pins; struct gpio_chip *gc; }; @@ -108,18 +108,18 @@ struct pinctrl_gpio_range { struct pinctrl_ops { int (*get_groups_count) (struct pinctrl_dev *pctldev); const char *(*get_group_name) (struct pinctrl_dev *pctldev, - unsigned selector); + unsigned int selector); int (*get_group_pins) (struct pinctrl_dev *pctldev, - unsigned selector, - const unsigned **pins, - unsigned *num_pins); + unsigned int selector, + const unsigned int **pins, + unsigned int *num_pins); void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, - unsigned offset); + unsigned int offset); int (*dt_node_to_map) (struct pinctrl_dev *pctldev, struct device_node *np_config, - struct pinctrl_map **map, unsigned *num_maps); + struct pinctrl_map **map, unsigned int *num_maps); void (*dt_free_map) (struct pinctrl_dev *pctldev, - struct pinctrl_map *map, unsigned num_maps); + struct pinctrl_map *map, unsigned int num_maps); }; /** @@ -193,7 +193,7 @@ extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *ranges, - unsigned nranges); + unsigned int nranges); extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); @@ -203,8 +203,8 @@ extern struct pinctrl_gpio_range * pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev, unsigned int pin); extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, - const char *pin_group, const unsigned **pins, - unsigned *num_pins); + const char *pin_group, const unsigned int **pins, + unsigned int *num_pins); /** * struct pinfunction - Description about a function diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h index a7e370965c53..d6f7b58d6ad0 100644 --- a/include/linux/pinctrl/pinmux.h +++ b/include/linux/pinctrl/pinmux.h @@ -57,26 +57,26 @@ struct pinctrl_gpio_range; * the pin request. */ struct pinmux_ops { - int (*request) (struct pinctrl_dev *pctldev, unsigned offset); - int (*free) (struct pinctrl_dev *pctldev, unsigned offset); + int (*request) (struct pinctrl_dev *pctldev, unsigned int offset); + int (*free) (struct pinctrl_dev *pctldev, unsigned int offset); int (*get_functions_count) (struct pinctrl_dev *pctldev); const char *(*get_function_name) (struct pinctrl_dev *pctldev, - unsigned selector); + unsigned int selector); int (*get_function_groups) (struct pinctrl_dev *pctldev, - unsigned selector, - const char * const **groups, - unsigned *num_groups); - int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector, - unsigned group_selector); + unsigned int selector, + const char * const **groups, + unsigned int *num_groups); + int (*set_mux) (struct pinctrl_dev *pctldev, unsigned int func_selector, + unsigned int group_selector); int (*gpio_request_enable) (struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, - unsigned offset); + unsigned int offset); void (*gpio_disable_free) (struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, - unsigned offset); + unsigned int offset); int (*gpio_set_direction) (struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, - unsigned offset, + unsigned int offset, bool input); bool strict; }; diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 608a9eb86bff..8ff23bf5a819 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -62,9 +62,6 @@ struct pipe_inode_info { unsigned int tail; unsigned int max_usage; unsigned int ring_size; -#ifdef CONFIG_WATCH_QUEUE - bool note_loss; -#endif unsigned int nr_accounted; unsigned int readers; unsigned int writers; @@ -72,6 +69,9 @@ struct pipe_inode_info { unsigned int r_counter; unsigned int w_counter; bool poll_usage; +#ifdef CONFIG_WATCH_QUEUE + bool note_loss; +#endif struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; @@ -125,6 +125,22 @@ struct pipe_buf_operations { }; /** + * pipe_has_watch_queue - Check whether the pipe is a watch_queue, + * i.e. it was created with O_NOTIFICATION_PIPE + * @pipe: The pipe to check + * + * Return: true if pipe is a watch queue, false otherwise. + */ +static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe) +{ +#ifdef CONFIG_WATCH_QUEUE + return pipe->watch_queue != NULL; +#else + return false; +#endif +} + +/** * pipe_empty - Return true if the pipe is empty * @head: The pipe ring head pointer * @tail: The pipe ring tail pointer diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index 80cb00db42a4..79594aeb160d 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h @@ -154,7 +154,9 @@ struct packet_stacked_data struct pktcdvd_device { - struct block_device *bdev; /* dev attached */ + struct bdev_handle *bdev_handle; /* dev attached */ + /* handle acquired for bdev during pkt_open_dev() */ + struct bdev_handle *open_bdev_handle; dev_t pkt_dev; /* our dev */ struct packet_settings settings; struct packet_stats stats; diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index ab721cf13a98..7dae17b62a4d 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4436,8 +4436,20 @@ struct ec_response_i2c_passthru_protect { * These commands are for sending and receiving message via HDMI CEC */ +#define EC_CEC_MAX_PORTS 16 + #define MAX_CEC_MSG_LEN 16 +/* + * Helper macros for packing/unpacking cec_events. + * bits[27:0] : bitmask of events from enum mkbp_cec_event + * bits[31:28]: port number + */ +#define EC_MKBP_EVENT_CEC_PACK(events, port) \ + (((events) & GENMASK(27, 0)) | (((port) & 0xf) << 28)) +#define EC_MKBP_EVENT_CEC_GET_EVENTS(event) ((event) & GENMASK(27, 0)) +#define EC_MKBP_EVENT_CEC_GET_PORT(event) (((event) >> 28) & 0xf) + /* CEC message from the AP to be written on the CEC bus */ #define EC_CMD_CEC_WRITE_MSG 0x00B8 @@ -4449,19 +4461,54 @@ struct ec_params_cec_write { uint8_t msg[MAX_CEC_MSG_LEN]; } __ec_align1; +/** + * struct ec_params_cec_write_v1 - Message to write to the CEC bus + * @port: CEC port to write the message on + * @msg_len: length of msg in bytes + * @msg: message content to write to the CEC bus + */ +struct ec_params_cec_write_v1 { + uint8_t port; + uint8_t msg_len; + uint8_t msg[MAX_CEC_MSG_LEN]; +} __ec_align1; + +/* CEC message read from a CEC bus reported back to the AP */ +#define EC_CMD_CEC_READ_MSG 0x00B9 + +/** + * struct ec_params_cec_read - Read a message from the CEC bus + * @port: CEC port to read a message on + */ +struct ec_params_cec_read { + uint8_t port; +} __ec_align1; + +/** + * struct ec_response_cec_read - Message read from the CEC bus + * @msg_len: length of msg in bytes + * @msg: message content read from the CEC bus + */ +struct ec_response_cec_read { + uint8_t msg_len; + uint8_t msg[MAX_CEC_MSG_LEN]; +} __ec_align1; + /* Set various CEC parameters */ #define EC_CMD_CEC_SET 0x00BA /** * struct ec_params_cec_set - CEC parameters set * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + * @port: CEC port to set the parameter on * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC * or 1 to enable CEC functionality, in case cmd is * CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical * address between 0 and 15 or 0xff to unregister */ struct ec_params_cec_set { - uint8_t cmd; /* enum cec_command */ + uint8_t cmd : 4; /* enum cec_command */ + uint8_t port : 4; uint8_t val; } __ec_align1; @@ -4471,9 +4518,11 @@ struct ec_params_cec_set { /** * struct ec_params_cec_get - CEC parameters get * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + * @port: CEC port to get the parameter on */ struct ec_params_cec_get { - uint8_t cmd; /* enum cec_command */ + uint8_t cmd : 4; /* enum cec_command */ + uint8_t port : 4; } __ec_align1; /** @@ -4487,6 +4536,17 @@ struct ec_response_cec_get { uint8_t val; } __ec_align1; +/* Get the number of CEC ports */ +#define EC_CMD_CEC_PORT_COUNT 0x00C1 + +/** + * struct ec_response_cec_port_count - CEC port count response + * @port_count: number of CEC ports + */ +struct ec_response_cec_port_count { + uint8_t port_count; +} __ec_align1; + /* CEC parameters command */ enum cec_command { /* CEC reading, writing and events enable */ @@ -4501,6 +4561,8 @@ enum mkbp_cec_event { EC_MKBP_CEC_SEND_OK = BIT(0), /* Outgoing message was not acknowledged */ EC_MKBP_CEC_SEND_FAILED = BIT(1), + /* Incoming message can be read out by AP */ + EC_MKBP_CEC_HAVE_DATA = BIT(2), }; /*****************************************************************************/ diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 4f9f756bc17c..8865e350c12a 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -258,7 +258,7 @@ bool cros_ec_check_features(struct cros_ec_dev *ec, int feature); int cros_ec_get_sensor_count(struct cros_ec_dev *ec); -int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata, +int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, const void *outdata, size_t outsize, void *indata, size_t insize); /** diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index f377817ce75c..cdd8cfb424f5 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -144,9 +144,6 @@ #define OMAP_MAX_GPIO_LINES 192 -#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) -#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) - #ifndef __ASSEMBLER__ struct omap_gpio_reg_offs { u16 revision; diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h index f2781aa7eff8..70e8a6bec0f6 100644 --- a/include/linux/platform_data/gsc_hwmon.h +++ b/include/linux/platform_data/gsc_hwmon.h @@ -40,6 +40,6 @@ struct gsc_hwmon_platform_data { unsigned int resolution; unsigned int vreference; unsigned int fan_base; - struct gsc_hwmon_channel channels[]; + struct gsc_hwmon_channel channels[] __counted_by(nchannels); }; #endif diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h index 2543c2a1c9ae..e2e895768311 100644 --- a/include/linux/platform_data/i2c-mux-reg.h +++ b/include/linux/platform_data/i2c-mux-reg.h @@ -17,7 +17,6 @@ * @n_values: Number of multiplexer channels * @little_endian: Indicating if the register is in little endian * @write_only: Reading the register is not allowed by hardware - * @classes: Optional I2C auto-detection classes * @idle: Value to write to mux when idle * @idle_in_use: indicate if idle value is in use * @reg: Virtual address of the register to switch channel @@ -30,7 +29,6 @@ struct i2c_mux_reg_platform_data { int n_values; bool little_endian; bool write_only; - const unsigned int *classes; u32 idle; bool idle_in_use; void __iomem *reg; diff --git a/include/linux/platform_data/keypad-omap.h b/include/linux/platform_data/keypad-omap.h index 3e7c64c854f4..f3f1311cdf3a 100644 --- a/include/linux/platform_data/keypad-omap.h +++ b/include/linux/platform_data/keypad-omap.h @@ -19,9 +19,6 @@ struct omap_kp_platform_data { bool rep; unsigned long delay; bool dbounce; - /* specific to OMAP242x*/ - unsigned int *row_gpios; - unsigned int *col_gpios; }; /* Group (0..3) -- when multiple keys are pressed, only the diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h index ea1cc6d829e9..f177416635a2 100644 --- a/include/linux/platform_data/microchip-ksz.h +++ b/include/linux/platform_data/microchip-ksz.h @@ -20,10 +20,31 @@ #define __MICROCHIP_KSZ_H #include <linux/types.h> +#include <linux/platform_data/dsa.h> + +enum ksz_chip_id { + KSZ8563_CHIP_ID = 0x8563, + KSZ8795_CHIP_ID = 0x8795, + KSZ8794_CHIP_ID = 0x8794, + KSZ8765_CHIP_ID = 0x8765, + KSZ8830_CHIP_ID = 0x8830, + KSZ9477_CHIP_ID = 0x00947700, + KSZ9896_CHIP_ID = 0x00989600, + KSZ9897_CHIP_ID = 0x00989700, + KSZ9893_CHIP_ID = 0x00989300, + KSZ9563_CHIP_ID = 0x00956300, + KSZ9567_CHIP_ID = 0x00956700, + LAN9370_CHIP_ID = 0x00937000, + LAN9371_CHIP_ID = 0x00937100, + LAN9372_CHIP_ID = 0x00937200, + LAN9373_CHIP_ID = 0x00937300, + LAN9374_CHIP_ID = 0x00937400, +}; struct ksz_platform_data { + /* Must be first such that dsa_register_switch() can access it */ + struct dsa_chip_data cd; u32 chip_id; - u16 enabled_ports; }; #endif diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h index 0dd851ea1c72..7fcb55fe21c9 100644 --- a/include/linux/platform_data/omap-twl4030.h +++ b/include/linux/platform_data/omap-twl4030.h @@ -37,9 +37,6 @@ struct omap_tw4030_pdata { bool has_digimic0; bool has_digimic1; u8 has_linein; - - /* Jack detect GPIO or <= 0 if it is not implemented */ - int jack_detect; }; #endif /* _OMAP_TWL4030_H_ */ diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h index 96c1a14ab365..3c3787c4d96c 100644 --- a/include/linux/platform_data/pca953x.h +++ b/include/linux/platform_data/pca953x.h @@ -11,21 +11,8 @@ struct pca953x_platform_data { /* number of the first GPIO */ unsigned gpio_base; - /* initial polarity inversion setting */ - u32 invert; - /* interrupt base */ int irq_base; - - void *context; /* param to setup/teardown */ - - int (*setup)(struct i2c_client *client, - unsigned gpio, unsigned ngpio, - void *context); - void (*teardown)(struct i2c_client *client, - unsigned gpio, unsigned ngpio, - void *context); - const char *const *names; }; #endif /* _LINUX_PCA953X_H */ diff --git a/include/linux/platform_data/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h deleted file mode 100644 index 22c53825528f..000000000000 --- a/include/linux/platform_data/rtc-ds2404.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * ds2404.h - platform data structure for the DS2404 RTC. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org> - */ - -#ifndef __LINUX_DS2404_H -#define __LINUX_DS2404_H - -struct ds2404_platform_data { - - unsigned int gpio_rst; - unsigned int gpio_clk; - unsigned int gpio_dq; -}; -#endif diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h index d661399b217d..6c19d4fbbe39 100644 --- a/include/linux/platform_data/shmob_drm.h +++ b/include/linux/platform_data/shmob_drm.h @@ -10,7 +10,7 @@ #ifndef __SHMOB_DRM_H__ #define __SHMOB_DRM_H__ -#include <drm/drm_mode.h> +#include <video/videomode.h> enum shmob_drm_clk_source { SHMOB_DRM_CLK_BUS, @@ -18,72 +18,21 @@ enum shmob_drm_clk_source { SHMOB_DRM_CLK_EXTERNAL, }; -enum shmob_drm_interface { - SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */ - SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */ - SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */ - SHMOB_DRM_IFACE_RGB12B, /* 12bpp */ - SHMOB_DRM_IFACE_RGB16, /* 16bpp */ - SHMOB_DRM_IFACE_RGB18, /* 18bpp */ - SHMOB_DRM_IFACE_RGB24, /* 24bpp */ - SHMOB_DRM_IFACE_YUV422, /* 16bpp */ - SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */ - SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */ - SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */ - SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */ - SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */ - SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */ - SHMOB_DRM_IFACE_SYS16A, /* 16bpp */ - SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */ - SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */ - SHMOB_DRM_IFACE_SYS18, /* 18bpp */ - SHMOB_DRM_IFACE_SYS24, /* 24bpp */ -}; - -struct shmob_drm_backlight_data { - const char *name; - int max_brightness; - int (*get_brightness)(void); - int (*set_brightness)(int brightness); -}; - struct shmob_drm_panel_data { unsigned int width_mm; /* Panel width in mm */ unsigned int height_mm; /* Panel height in mm */ - struct drm_mode_modeinfo mode; + struct videomode mode; }; -struct shmob_drm_sys_interface_data { - unsigned int read_latch:6; - unsigned int read_setup:8; - unsigned int read_cycle:8; - unsigned int read_strobe:8; - unsigned int write_setup:8; - unsigned int write_cycle:8; - unsigned int write_strobe:8; - unsigned int cs_setup:3; - unsigned int vsync_active_high:1; - unsigned int vsync_dir_input:1; -}; - -#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */ -#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */ -#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */ -#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */ -#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */ - struct shmob_drm_interface_data { - enum shmob_drm_interface interface; - struct shmob_drm_sys_interface_data sys; + unsigned int bus_fmt; /* MEDIA_BUS_FMT_* */ unsigned int clk_div; - unsigned int flags; }; struct shmob_drm_platform_data { enum shmob_drm_clk_source clk_source; struct shmob_drm_interface_data iface; struct shmob_drm_panel_data panel; - struct shmob_drm_backlight_data backlight; }; #endif /* __SHMOB_DRM_H__ */ diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h index c71a2dd66143..5f412a615532 100644 --- a/include/linux/platform_data/si5351.h +++ b/include/linux/platform_data/si5351.h @@ -105,10 +105,12 @@ struct si5351_clkout_config { * @clk_xtal: xtal input clock * @clk_clkin: clkin input clock * @pll_src: array of pll source clock setting + * @pll_reset: array indicating if plls should be reset after setting the rate * @clkout: array of clkout configuration */ struct si5351_platform_data { enum si5351_pll_src pll_src[2]; + bool pll_reset[2]; struct si5351_clkout_config clkout[8]; }; diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 16e99a1c37fc..ab1c7deff118 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -58,6 +58,10 @@ #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ #define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 +/* This can only be used to disable the screen, not re-enable */ +#define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031 +/* Writing a brightness re-enables the screen if disabled */ +#define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032 #define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018 #define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075 @@ -110,6 +114,9 @@ /* Charging mode - 1=Barrel, 2=USB */ #define ASUS_WMI_DEVID_CHARGE_MODE 0x0012006C +/* MCU powersave mode */ +#define ASUS_WMI_DEVID_MCU_POWERSAVE 0x001200E2 + /* epu is connected? 1 == true */ #define ASUS_WMI_DEVID_EGPU_CONNECTED 0x00090018 /* egpu on/off */ diff --git a/include/linux/platform_data/x86/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h index 41df326583f9..7f132029316a 100644 --- a/include/linux/platform_data/x86/clk-lpss.h +++ b/include/linux/platform_data/x86/clk-lpss.h @@ -15,6 +15,6 @@ struct lpss_clk_data { struct clk *clk; }; -extern int lpss_atom_clk_init(void); +int lpss_atom_clk_init(void); #endif /* __CLK_LPSS_H */ diff --git a/include/linux/plist.h b/include/linux/plist.h index 0f352c1d3c80..8c1c8adf7fe9 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -75,20 +75,10 @@ #include <linux/container_of.h> #include <linux/list.h> -#include <linux/types.h> +#include <linux/plist_types.h> #include <asm/bug.h> -struct plist_head { - struct list_head node_list; -}; - -struct plist_node { - int prio; - struct list_head prio_list; - struct list_head node_list; -}; - /** * PLIST_HEAD_INIT - static struct plist_head initializer * @head: struct plist_head variable name diff --git a/include/linux/plist_types.h b/include/linux/plist_types.h new file mode 100644 index 000000000000..c37e784330af --- /dev/null +++ b/include/linux/plist_types.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_PLIST_TYPES_H +#define _LINUX_PLIST_TYPES_H + +#include <linux/types.h> + +struct plist_head { + struct list_head node_list; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +#endif /* _LINUX_PLIST_TYPES_H */ diff --git a/include/linux/pm.h b/include/linux/pm.h index 1400c37b29c7..a2f3e53a8196 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -374,24 +374,39 @@ const struct dev_pm_ops name = { \ RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \ } -#ifdef CONFIG_PM -#define _EXPORT_DEV_PM_OPS(name, license, ns) \ +#define _EXPORT_PM_OPS(name, license, ns) \ const struct dev_pm_ops name; \ __EXPORT_SYMBOL(name, license, ns); \ const struct dev_pm_ops name -#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name) -#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns) -#else -#define _EXPORT_DEV_PM_OPS(name, license, ns) \ + +#define _DISCARD_PM_OPS(name, license, ns) \ static __maybe_unused const struct dev_pm_ops __static_##name + +#ifdef CONFIG_PM +#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns) +#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name) +#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns) +#else +#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns) #define EXPORT_PM_FN_GPL(name) #define EXPORT_PM_FN_NS_GPL(name, ns) #endif -#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "") -#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "") -#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns) -#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns) +#ifdef CONFIG_PM_SLEEP +#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns) +#else +#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns) +#endif + +#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "") +#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "") +#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns) +#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns) + +#define EXPORT_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "", "") +#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "") +#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns) +#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns) /* * Use this if you want to use the same suspend and resume callbacks for suspend @@ -404,19 +419,19 @@ const struct dev_pm_ops name = { \ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL) #define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ - EXPORT_DEV_PM_OPS(name) = { \ + EXPORT_DEV_SLEEP_PM_OPS(name) = { \ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } #define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ - EXPORT_GPL_DEV_PM_OPS(name) = { \ + EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } #define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \ - EXPORT_NS_DEV_PM_OPS(name, ns) = { \ + EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } #define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \ - EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \ + EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } @@ -666,6 +681,7 @@ struct dev_pm_info { bool wakeup_path:1; bool syscore:1; bool no_pm_callbacks:1; /* Owned by the PM core */ + bool async_in_progress:1; /* Owned by the PM core */ unsigned int must_resume:1; /* Owned by the PM core */ unsigned int may_skip_resume:1; /* Set by subsystems */ #else @@ -719,6 +735,7 @@ extern void dev_pm_put_subsys_data(struct device *dev); * @activate: Called before executing probe routines for bus types and drivers. * @sync: Called after successful driver probe. * @dismiss: Called after unsuccessful driver probe and after driver removal. + * @set_performance_state: Called to request a new performance state. * * Power domains provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions instead of @@ -731,6 +748,7 @@ struct dev_pm_domain { int (*activate)(struct device *dev); void (*sync)(struct device *dev); void (*dismiss)(struct device *dev); + int (*set_performance_state)(struct device *dev, unsigned int state); }; /* diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index ada3a0ab10bf..68669ce18720 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -91,10 +91,10 @@ static inline int devm_pm_clk_create(struct device *dev) #endif #ifdef CONFIG_HAVE_CLK -extern void pm_clk_add_notifier(struct bus_type *bus, +extern void pm_clk_add_notifier(const struct bus_type *bus, struct pm_clk_notifier_block *clknb); #else -static inline void pm_clk_add_notifier(struct bus_type *bus, +static inline void pm_clk_add_notifier(const struct bus_type *bus, struct pm_clk_notifier_block *clknb) { } diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index f776fb93eaa0..b97c5e9820f9 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -61,6 +61,10 @@ * GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its * components' next wakeup when determining the * optimal idle state. + * + * GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states, + * but its corresponding OPP tables are not + * described in DT, but are given directly by FW. */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) @@ -69,6 +73,7 @@ #define GENPD_FLAG_CPU_DOMAIN (1U << 4) #define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5) #define GENPD_FLAG_MIN_RESIDENCY (1U << 6) +#define GENPD_FLAG_OPP_TABLE_FW (1U << 7) enum gpd_status { GENPD_STATE_ON = 0, /* PM domain is on */ @@ -113,7 +118,6 @@ struct genpd_power_state { }; struct genpd_lock_ops; -struct dev_pm_opp; struct opp_table; struct generic_pm_domain { @@ -141,8 +145,6 @@ struct generic_pm_domain { int (*power_on)(struct generic_pm_domain *domain); struct raw_notifier_head power_notifiers; /* Power on/off notifiers */ struct opp_table *opp_table; /* OPP table of the genpd */ - unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd, - struct dev_pm_opp *opp); int (*set_performance_state)(struct generic_pm_domain *genpd, unsigned int state); struct gpd_dev_ops dev_ops; @@ -343,8 +345,6 @@ int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n); -unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, - struct dev_pm_opp *opp); int genpd_dev_pm_attach(struct device *dev); struct device *genpd_dev_pm_attach_by_id(struct device *dev, @@ -390,13 +390,6 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn, return -ENODEV; } -static inline unsigned int -pm_genpd_opp_to_performance_state(struct device *genpd_dev, - struct dev_pm_opp *opp) -{ - return 0; -} - static inline int genpd_dev_pm_attach(struct device *dev) { return 0; @@ -430,6 +423,7 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev, void dev_pm_domain_detach(struct device *dev, bool power_off); int dev_pm_domain_start(struct device *dev); void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); +int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state); #else static inline int dev_pm_domain_attach(struct device *dev, bool power_on) { @@ -452,6 +446,11 @@ static inline int dev_pm_domain_start(struct device *dev) } static inline void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd) {} +static inline int dev_pm_domain_set_performance_state(struct device *dev, + unsigned int state) +{ + return 0; +} #endif #endif /* _LINUX_PM_DOMAIN_H */ diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 91f87d7e807c..76dcb7f37bcd 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -45,18 +45,6 @@ struct dev_pm_opp_supply { unsigned long u_watt; }; -/** - * struct dev_pm_opp_icc_bw - Interconnect bandwidth values - * @avg: Average bandwidth corresponding to this OPP (in icc units) - * @peak: Peak bandwidth corresponding to this OPP (in icc units) - * - * This structure stores the bandwidth values for a single interconnect path. - */ -struct dev_pm_opp_icc_bw { - u32 avg; - u32 peak; -}; - typedef int (*config_regulators_t)(struct device *dev, struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp, struct regulator **regulators, unsigned int count); @@ -74,8 +62,10 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table, * @supported_hw_count: Number of elements in the array. * @regulator_names: Array of pointers to the names of the regulator, NULL terminated. * @genpd_names: Null terminated array of pointers containing names of genpd to - * attach. - * @virt_devs: Pointer to return the array of virtual devices. + * attach. Mutually exclusive with required_devs. + * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually + * exclusive with required_devs. + * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs. * * This structure contains platform specific OPP configurations for the device. */ @@ -90,6 +80,22 @@ struct dev_pm_opp_config { const char * const *regulator_names; const char * const *genpd_names; struct device ***virt_devs; + struct device **required_devs; +}; + +#define OPP_LEVEL_UNSET U32_MAX + +/** + * struct dev_pm_opp_data - The data to use to initialize an OPP. + * @level: The performance level for the OPP. Set level to OPP_LEVEL_UNSET if + * level field isn't used. + * @freq: The clock rate in Hz for the OPP. + * @u_volt: The voltage in uV for the OPP. + */ +struct dev_pm_opp_data { + unsigned int level; + unsigned long freq; + unsigned long u_volt; }; #if defined(CONFIG_PM_OPP) @@ -144,6 +150,9 @@ struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, unsigned int *level); +struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, + unsigned int *level); + struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, int index); @@ -152,8 +161,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, void dev_pm_opp_put(struct dev_pm_opp *opp); -int dev_pm_opp_add(struct device *dev, unsigned long freq, - unsigned long u_volt); +int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp); + void dev_pm_opp_remove(struct device *dev, unsigned long freq); void dev_pm_opp_remove_all_dynamic(struct device *dev); @@ -308,6 +317,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, return ERR_PTR(-EOPNOTSUPP); } +static inline struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, + unsigned int *level) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, int index) { @@ -322,8 +337,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} -static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, - unsigned long u_volt) +static inline int +dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp) { return -EOPNOTSUPP; } @@ -519,6 +534,17 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta /* OPP Configuration helpers */ +static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, + unsigned long u_volt) +{ + struct dev_pm_opp_data data = { + .freq = freq, + .u_volt = u_volt, + }; + + return dev_pm_opp_add_dynamic(dev, &data); +} + /* Regulators helpers */ static inline int dev_pm_opp_set_regulators(struct device *dev, const char * const names[]) diff --git a/include/linux/pnp.h b/include/linux/pnp.h index c2a7cfbca713..ddbe7c3ca4ce 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -291,7 +291,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data) struct pnp_fixup { char id[7]; - void (*quirk_function) (struct pnp_dev * dev); /* fixup function */ + void (*quirk_function) (struct pnp_dev *dev); /* fixup function */ }; /* config parameters */ @@ -419,8 +419,8 @@ struct pnp_protocol { /* protocol specific suspend/resume */ bool (*can_wakeup) (struct pnp_dev *dev); - int (*suspend) (struct pnp_dev * dev, pm_message_t state); - int (*resume) (struct pnp_dev * dev); + int (*suspend) (struct pnp_dev *dev, pm_message_t state); + int (*resume) (struct pnp_dev *dev); /* used by pnp layer only (look but don't touch) */ unsigned char number; /* protocol number */ @@ -435,7 +435,7 @@ struct pnp_protocol { #define protocol_for_each_dev(protocol, dev) \ list_for_each_entry(dev, &(protocol)->devices, protocol_list) -extern struct bus_type pnp_bus_type; +extern const struct bus_type pnp_bus_type; #if defined(CONFIG_PNP) @@ -492,7 +492,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; } -static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;} +static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0; } /* protocol helpers */ static inline int pnp_is_active(struct pnp_dev *dev) { return 0; } diff --git a/include/linux/poison.h b/include/linux/poison.h index 851a855d3868..27a7dad17eef 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -83,6 +83,8 @@ /********** net/core/skbuff.c **********/ #define SKB_LIST_POISON_NEXT ((void *)(0x800 + POISON_POINTER_DELTA)) +/********** net/ **********/ +#define NET_PTR_POISON ((void *)(0x801 + POISON_POINTER_DELTA)) /********** kernel/bpf/ **********/ #define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA)) diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 468328b1e1dd..ef8619f48920 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -14,6 +14,7 @@ #include <linux/rwsem.h> struct posix_clock; +struct posix_clock_context; /** * struct posix_clock_operations - functional interface to the clock @@ -50,18 +51,18 @@ struct posix_clock_operations { /* * Optional character device methods: */ - long (*ioctl) (struct posix_clock *pc, - unsigned int cmd, unsigned long arg); + long (*ioctl)(struct posix_clock_context *pccontext, unsigned int cmd, + unsigned long arg); - int (*open) (struct posix_clock *pc, fmode_t f_mode); + int (*open)(struct posix_clock_context *pccontext, fmode_t f_mode); - __poll_t (*poll) (struct posix_clock *pc, - struct file *file, poll_table *wait); + __poll_t (*poll)(struct posix_clock_context *pccontext, struct file *file, + poll_table *wait); - int (*release) (struct posix_clock *pc); + int (*release)(struct posix_clock_context *pccontext); - ssize_t (*read) (struct posix_clock *pc, - uint flags, char __user *buf, size_t cnt); + ssize_t (*read)(struct posix_clock_context *pccontext, uint flags, + char __user *buf, size_t cnt); }; /** @@ -91,6 +92,24 @@ struct posix_clock { }; /** + * struct posix_clock_context - represents clock file operations context + * + * @clk: Pointer to the clock + * @private_clkdata: Pointer to user data + * + * Drivers should use struct posix_clock_context during specific character + * device file operation methods to access the posix clock. + * + * Drivers can store a private data structure during the open operation + * if they have specific information that is required in other file + * operations. + */ +struct posix_clock_context { + struct posix_clock *clk; + void *private_clkdata; +}; + +/** * posix_clock_register() - register a new clock * @clk: Pointer to the clock. Caller must provide 'ops' field * @dev: Pointer to the initialized device. Caller must provide diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index d607f51404fc..dc7b738de299 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -2,40 +2,16 @@ #ifndef _linux_POSIX_TIMERS_H #define _linux_POSIX_TIMERS_H -#include <linux/spinlock.h> +#include <linux/alarmtimer.h> #include <linux/list.h> #include <linux/mutex.h> -#include <linux/alarmtimer.h> +#include <linux/posix-timers_types.h> +#include <linux/spinlock.h> #include <linux/timerqueue.h> struct kernel_siginfo; struct task_struct; -/* - * Bit fields within a clockid: - * - * The most significant 29 bits hold either a pid or a file descriptor. - * - * Bit 2 indicates whether a cpu clock refers to a thread or a process. - * - * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3. - * - * A clockid is invalid if bits 2, 1, and 0 are all set. - */ -#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3)) -#define CPUCLOCK_PERTHREAD(clock) \ - (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0) - -#define CPUCLOCK_PERTHREAD_MASK 4 -#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK) -#define CPUCLOCK_CLOCK_MASK 3 -#define CPUCLOCK_PROF 0 -#define CPUCLOCK_VIRT 1 -#define CPUCLOCK_SCHED 2 -#define CPUCLOCK_MAX 3 -#define CLOCKFD CPUCLOCK_MAX -#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) - static inline clockid_t make_process_cpuclock(const unsigned int pid, const clockid_t clock) { @@ -109,44 +85,6 @@ static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp) ctmr->node.expires = exp; } -/** - * posix_cputimer_base - Container per posix CPU clock - * @nextevt: Earliest-expiration cache - * @tqhead: timerqueue head for cpu_timers - */ -struct posix_cputimer_base { - u64 nextevt; - struct timerqueue_head tqhead; -}; - -/** - * posix_cputimers - Container for posix CPU timer related data - * @bases: Base container for posix CPU clocks - * @timers_active: Timers are queued. - * @expiry_active: Timer expiry is active. Used for - * process wide timers to avoid multiple - * task trying to handle expiry concurrently - * - * Used in task_struct and signal_struct - */ -struct posix_cputimers { - struct posix_cputimer_base bases[CPUCLOCK_MAX]; - unsigned int timers_active; - unsigned int expiry_active; -}; - -/** - * posix_cputimers_work - Container for task work based posix CPU timer expiry - * @work: The task work to be scheduled - * @mutex: Mutex held around expiry in context of this task work - * @scheduled: @work has been scheduled already, no further processing - */ -struct posix_cputimers_work { - struct callback_head work; - struct mutex mutex; - unsigned int scheduled; -}; - static inline void posix_cputimers_init(struct posix_cputimers *pct) { memset(pct, 0, sizeof(*pct)); @@ -179,7 +117,6 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, .bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \ }, #else -struct posix_cputimers { }; struct cpu_timer { }; #define INIT_CPU_TIMERS(s) static inline void posix_cputimers_init(struct posix_cputimers *pct) { } diff --git a/include/linux/posix-timers_types.h b/include/linux/posix-timers_types.h new file mode 100644 index 000000000000..a4712c1008c9 --- /dev/null +++ b/include/linux/posix-timers_types.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _linux_POSIX_TIMERS_TYPES_H +#define _linux_POSIX_TIMERS_TYPES_H + +#include <linux/mutex_types.h> +#include <linux/timerqueue_types.h> +#include <linux/types.h> + +/* + * Bit fields within a clockid: + * + * The most significant 29 bits hold either a pid or a file descriptor. + * + * Bit 2 indicates whether a cpu clock refers to a thread or a process. + * + * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3. + * + * A clockid is invalid if bits 2, 1, and 0 are all set. + */ +#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3)) +#define CPUCLOCK_PERTHREAD(clock) \ + (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0) + +#define CPUCLOCK_PERTHREAD_MASK 4 +#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK) +#define CPUCLOCK_CLOCK_MASK 3 +#define CPUCLOCK_PROF 0 +#define CPUCLOCK_VIRT 1 +#define CPUCLOCK_SCHED 2 +#define CPUCLOCK_MAX 3 +#define CLOCKFD CPUCLOCK_MAX +#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) + +#ifdef CONFIG_POSIX_TIMERS + +/** + * posix_cputimer_base - Container per posix CPU clock + * @nextevt: Earliest-expiration cache + * @tqhead: timerqueue head for cpu_timers + */ +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +/** + * posix_cputimers - Container for posix CPU timer related data + * @bases: Base container for posix CPU clocks + * @timers_active: Timers are queued. + * @expiry_active: Timer expiry is active. Used for + * process wide timers to avoid multiple + * task trying to handle expiry concurrently + * + * Used in task_struct and signal_struct + */ +struct posix_cputimers { + struct posix_cputimer_base bases[CPUCLOCK_MAX]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +/** + * posix_cputimers_work - Container for task work based posix CPU timer expiry + * @work: The task work to be scheduled + * @mutex: Mutex held around expiry in context of this task work + * @scheduled: @work has been scheduled already, no further processing + */ +struct posix_cputimers_work { + struct callback_head work; + struct mutex mutex; + unsigned int scheduled; +}; + +#else /* CONFIG_POSIX_TIMERS */ + +struct posix_cputimers { }; + +#endif /* CONFIG_POSIX_TIMERS */ + +#endif /* _linux_POSIX_TIMERS_TYPES_H */ diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 7c8d65414a70..7d8025fb74b7 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -83,5 +83,6 @@ struct bq27xxx_device_info { void bq27xxx_battery_update(struct bq27xxx_device_info *di); int bq27xxx_battery_setup(struct bq27xxx_device_info *di); void bq27xxx_battery_teardown(struct bq27xxx_device_info *di); +extern const struct dev_pm_ops bq27xxx_battery_battery_pm_ops; #endif diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index a427f13c757f..c0992a77feea 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -767,7 +767,6 @@ struct power_supply_battery_info { int bti_resistance_tolerance; }; -extern struct atomic_notifier_head power_supply_notifier; extern int power_supply_reg_notifier(struct notifier_block *nb); extern void power_supply_unreg_notifier(struct notifier_block *nb); #if IS_ENABLED(CONFIG_POWER_SUPPLY) diff --git a/include/linux/prandom.h b/include/linux/prandom.h index f2ed5b72b3d6..f7f1e5251c67 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -10,7 +10,6 @@ #include <linux/types.h> #include <linux/once.h> -#include <linux/percpu.h> #include <linux/random.h> struct rnd_state { diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 1424670df161..7233e9cf1bab 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -9,7 +9,7 @@ #include <linux/linkage.h> #include <linux/cleanup.h> -#include <linux/list.h> +#include <linux/types.h> /* * We put the hardirq and softirq counter into the preemption @@ -99,14 +99,21 @@ static __always_inline unsigned char interrupt_context_level(void) return level; } +/* + * These macro definitions avoid redundant invocations of preempt_count() + * because such invocations would result in redundant loads given that + * preempt_count() is commonly implemented with READ_ONCE(). + */ + #define nmi_count() (preempt_count() & NMI_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #ifdef CONFIG_PREEMPT_RT # define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK) +# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count()) #else # define softirq_count() (preempt_count() & SOFTIRQ_MASK) +# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK)) #endif -#define irq_count() (nmi_count() | hardirq_count() | softirq_count()) /* * Macros to retrieve the current execution context: @@ -119,7 +126,11 @@ static __always_inline unsigned char interrupt_context_level(void) #define in_nmi() (nmi_count()) #define in_hardirq() (hardirq_count()) #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) -#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq())) +#ifdef CONFIG_PREEMPT_RT +# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq())) +#else +# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) +#endif /* * The following macros are deprecated and should not be used in new code: @@ -349,7 +360,9 @@ void preempt_notifier_unregister(struct preempt_notifier *notifier); static inline void preempt_notifier_init(struct preempt_notifier *notifier, struct preempt_ops *ops) { - INIT_HLIST_NODE(¬ifier->link); + /* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */ + notifier->link.next = NULL; + notifier->link.pprev = NULL; notifier->ops = ops; } diff --git a/include/linux/property.h b/include/linux/property.h index 8c3c6685a2ae..e6516d0b7d52 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -10,6 +10,7 @@ #ifndef _LINUX_PROPERTY_H_ #define _LINUX_PROPERTY_H_ +#include <linux/args.h> #include <linux/bits.h> #include <linux/fwnode.h> #include <linux/stddef.h> @@ -79,6 +80,16 @@ int fwnode_property_match_string(const struct fwnode_handle *fwnode, bool fwnode_device_is_available(const struct fwnode_handle *fwnode); +static inline bool fwnode_device_is_big_endian(const struct fwnode_handle *fwnode) +{ + if (fwnode_property_present(fwnode, "big-endian")) + return true; + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && + fwnode_property_present(fwnode, "native-endian")) + return true; + return false; +} + static inline bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char *compat) { @@ -86,6 +97,22 @@ bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char } /** + * device_is_big_endian - check if a device has BE registers + * @dev: Pointer to the struct device + * + * Returns: true if the device has a "big-endian" property, or if the kernel + * was compiled for BE *and* the device has a "native-endian" property. + * Returns false otherwise. + * + * Callers would nominally use ioread32be/iowrite32be if + * device_is_big_endian() == true, or readl/writel otherwise. + */ +static inline bool device_is_big_endian(const struct device *dev) +{ + return fwnode_device_is_big_endian(dev_fwnode(dev)); +} + +/** * device_is_compatible - match 'compatible' property of the device with a given string * @dev: Pointer to the struct device * @compat: The string to match 'compatible' property with @@ -97,6 +124,18 @@ static inline bool device_is_compatible(const struct device *dev, const char *co return fwnode_device_is_compatible(dev_fwnode(dev), compat); } +int fwnode_property_match_property_string(const struct fwnode_handle *fwnode, + const char *propname, + const char * const *array, size_t n); + +static inline +int device_property_match_property_string(const struct device *dev, + const char *propname, + const char * const *array, size_t n) +{ + return fwnode_property_match_property_string(dev_fwnode(dev), propname, array, n); +} + int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, const char *prop, const char *nargs_prop, unsigned int nargs, unsigned int index, @@ -108,6 +147,7 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode, const char *fwnode_get_name(const struct fwnode_handle *fwnode); const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode); +bool fwnode_name_eq(const struct fwnode_handle *fwnode, const char *name); struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode); @@ -288,7 +328,7 @@ struct software_node_ref_args { #define SOFTWARE_NODE_REFERENCE(_ref_, ...) \ (const struct software_node_ref_args) { \ .node = _ref_, \ - .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \ + .nargs = COUNT_ARGS(__VA_ARGS__), \ .args = { __VA_ARGS__ }, \ } @@ -488,6 +528,13 @@ struct software_node { const struct property_entry *properties; }; +#define SOFTWARE_NODE(_name_, _properties_, _parent_) \ + (struct software_node) { \ + .name = _name_, \ + .properties = _properties_, \ + .parent = _parent_, \ + } + bool is_software_node(const struct fwnode_handle *fwnode); const struct software_node * to_software_node(const struct fwnode_handle *fwnode); diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h index eceda1d1407a..730f77381d55 100644 --- a/include/linux/pseudo_fs.h +++ b/include/linux/pseudo_fs.h @@ -5,7 +5,7 @@ struct pseudo_fs_context { const struct super_operations *ops; - const struct xattr_handler **xattr; + const struct xattr_handler * const *xattr; const struct dentry_operations *dops; unsigned long magic; }; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 04ae1d9073a7..fcc2c4496f73 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -41,8 +41,8 @@ struct pwm_args { }; enum { - PWMF_REQUESTED = 1 << 0, - PWMF_EXPORTED = 1 << 1, + PWMF_REQUESTED = 0, + PWMF_EXPORTED = 1, }; /* @@ -69,9 +69,7 @@ struct pwm_state { * @label: name of the PWM device * @flags: flags associated with the PWM device * @hwpwm: per-chip relative index of the PWM device - * @pwm: global index of the PWM device * @chip: PWM chip providing this PWM device - * @chip_data: chip-private data associated with the PWM device * @args: PWM arguments * @state: last applied state * @last: last implemented state (for PWM_DEBUG) @@ -80,9 +78,7 @@ struct pwm_device { const char *label; unsigned long flags; unsigned int hwpwm; - unsigned int pwm; struct pwm_chip *chip; - void *chip_data; struct pwm_args args; struct pwm_state state; @@ -95,8 +91,8 @@ struct pwm_device { * @state: state to fill with the current PWM state * * The returned PWM state represents the state that was applied by a previous call to - * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to - * hardware. If pwm_apply_state() was never called, this returns either the current hardware + * pwm_apply_might_sleep(). Drivers may have to slightly tweak that state before programming it to + * hardware. If pwm_apply_might_sleep() was never called, this returns either the current hardware * state (if supported) or the default settings. */ static inline void pwm_get_state(const struct pwm_device *pwm, @@ -114,12 +110,6 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm) return state.enabled; } -static inline void pwm_set_period(struct pwm_device *pwm, u64 period) -{ - if (pwm) - pwm->state.period = period; -} - static inline u64 pwm_get_period(const struct pwm_device *pwm) { struct pwm_state state; @@ -129,12 +119,6 @@ static inline u64 pwm_get_period(const struct pwm_device *pwm) return state.period; } -static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) -{ - if (pwm) - pwm->state.duty_cycle = duty; -} - static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm) { struct pwm_state state; @@ -160,20 +144,20 @@ static inline void pwm_get_args(const struct pwm_device *pwm, } /** - * pwm_init_state() - prepare a new state to be applied with pwm_apply_state() + * pwm_init_state() - prepare a new state to be applied with pwm_apply_might_sleep() * @pwm: PWM device * @state: state to fill with the prepared PWM state * * This functions prepares a state that can later be tweaked and applied - * to the PWM device with pwm_apply_state(). This is a convenient function + * to the PWM device with pwm_apply_might_sleep(). This is a convenient function * that first retrieves the current PWM state and the replaces the period * and polarity fields with the reference values defined in pwm->args. * Once the function returns, you can adjust the ->enabled and ->duty_cycle - * fields according to your needs before calling pwm_apply_state(). + * fields according to your needs before calling pwm_apply_might_sleep(). * * ->duty_cycle is initially set to zero to avoid cases where the current * ->duty_cycle value exceed the pwm_args->period one, which would trigger - * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle + * an error if the user calls pwm_apply_might_sleep() without adjusting ->duty_cycle * first. */ static inline void pwm_init_state(const struct pwm_device *pwm, @@ -229,7 +213,7 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale) * * pwm_init_state(pwm, &state); * pwm_set_relative_duty_cycle(&state, 50, 100); - * pwm_apply_state(pwm, &state); + * pwm_apply_might_sleep(pwm, &state); * * This functions returns -EINVAL if @duty_cycle and/or @scale are * inconsistent (@scale == 0 or @duty_cycle > @scale). @@ -267,7 +251,6 @@ struct pwm_capture { * @get_state: get the current PWM state. This function is only * called once per PWM device when the PWM chip is * registered. - * @owner: helps prevent removal of modules exporting active PWMs */ struct pwm_ops { int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); @@ -278,38 +261,40 @@ struct pwm_ops { const struct pwm_state *state); int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); - struct module *owner; }; /** * struct pwm_chip - abstract a PWM controller * @dev: device providing the PWMs * @ops: callbacks for this PWM controller - * @base: number of first PWM controlled by this chip + * @owner: module providing this chip + * @id: unique number of this PWM chip * @npwm: number of PWMs controlled by this chip * @of_xlate: request a PWM device given a device tree PWM specifier * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier - * @list: list node for internal use + * @atomic: can the driver's ->apply() be called in atomic context * @pwms: array of PWM devices allocated by the framework */ struct pwm_chip { struct device *dev; const struct pwm_ops *ops; - int base; + struct module *owner; + unsigned int id; unsigned int npwm; - struct pwm_device * (*of_xlate)(struct pwm_chip *pc, + struct pwm_device * (*of_xlate)(struct pwm_chip *chip, const struct of_phandle_args *args); unsigned int of_pwm_n_cells; + bool atomic; /* only used internally by the PWM framework */ - struct list_head list; struct pwm_device *pwms; }; #if IS_ENABLED(CONFIG_PWM) /* PWM user APIs */ -int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state); +int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state); +int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state); int pwm_adjust_config(struct pwm_device *pwm); /** @@ -337,7 +322,7 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns, state.duty_cycle = duty_ns; state.period = period_ns; - return pwm_apply_state(pwm, &state); + return pwm_apply_might_sleep(pwm, &state); } /** @@ -358,7 +343,7 @@ static inline int pwm_enable(struct pwm_device *pwm) return 0; state.enabled = true; - return pwm_apply_state(pwm, &state); + return pwm_apply_might_sleep(pwm, &state); } /** @@ -377,27 +362,38 @@ static inline void pwm_disable(struct pwm_device *pwm) return; state.enabled = false; - pwm_apply_state(pwm, &state); + pwm_apply_might_sleep(pwm, &state); +} + +/** + * pwm_might_sleep() - is pwm_apply_atomic() supported? + * @pwm: PWM device + * + * Returns: false if pwm_apply_atomic() can be called from atomic context. + */ +static inline bool pwm_might_sleep(struct pwm_device *pwm) +{ + return !pwm->chip->atomic; } /* PWM provider APIs */ int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, unsigned long timeout); -int pwm_set_chip_data(struct pwm_device *pwm, void *data); -void *pwm_get_chip_data(struct pwm_device *pwm); -int pwmchip_add(struct pwm_chip *chip); +int __pwmchip_add(struct pwm_chip *chip, struct module *owner); +#define pwmchip_add(chip) __pwmchip_add(chip, THIS_MODULE) void pwmchip_remove(struct pwm_chip *chip); -int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip); +int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner); +#define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE) struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, unsigned int index, const char *label); -struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc, +struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args); -struct pwm_device *of_pwm_single_xlate(struct pwm_chip *pc, +struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args); struct pwm_device *pwm_get(struct device *dev, const char *con_id); @@ -408,16 +404,27 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, const char *con_id); #else -static inline int pwm_apply_state(struct pwm_device *pwm, - const struct pwm_state *state) +static inline bool pwm_might_sleep(struct pwm_device *pwm) +{ + return true; +} + +static inline int pwm_apply_might_sleep(struct pwm_device *pwm, + const struct pwm_state *state) { might_sleep(); - return -ENOTSUPP; + return -EOPNOTSUPP; +} + +static inline int pwm_apply_atomic(struct pwm_device *pwm, + const struct pwm_state *state) +{ + return -EOPNOTSUPP; } static inline int pwm_adjust_config(struct pwm_device *pwm) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int pwm_config(struct pwm_device *pwm, int duty_ns, @@ -445,16 +452,6 @@ static inline int pwm_capture(struct pwm_device *pwm, return -EINVAL; } -static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data) -{ - return -EINVAL; -} - -static inline void *pwm_get_chip_data(struct pwm_device *pwm) -{ - return NULL; -} - static inline int pwmchip_add(struct pwm_chip *chip) { return -EINVAL; @@ -536,7 +533,14 @@ static inline void pwm_apply_args(struct pwm_device *pwm) state.period = pwm->args.period; state.usage_power = false; - pwm_apply_state(pwm, &state); + pwm_apply_might_sleep(pwm, &state); +} + +/* only for backwards-compatibility, new code should not use this */ +static inline int pwm_apply_state(struct pwm_device *pwm, + const struct pwm_state *state) +{ + return pwm_apply_might_sleep(pwm, state); } struct pwm_lookup { diff --git a/include/linux/quota.h b/include/linux/quota.h index fd692b4a41d5..07071e64abf3 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type) #define DQ_FAKE_B 3 /* no limits only usage */ #define DQ_READ_B 4 /* dquot was read into memory */ #define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */ -#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\ +#define DQ_RELEASING_B 6 /* dquot is in releasing_dquots list waiting + * to be cleaned up */ +#define DQ_LASTSET_B 7 /* Following 6 bits (see QIF_) are reserved\ * for the mask of entries set via SETQUOTA\ * quotactl. They are set under dq_data_lock\ * and the quota format handling dquot can\ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 11a4becff3a9..06cc8888199e 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -57,7 +57,7 @@ static inline bool dquot_is_busy(struct dquot *dquot) { if (test_bit(DQ_MOD_B, &dquot->dq_flags)) return true; - if (atomic_read(&dquot->dq_count) > 1) + if (atomic_read(&dquot->dq_count) > 0) return true; return false; } @@ -74,7 +74,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags); int dquot_alloc_inode(struct inode *inode); -int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); +void dquot_claim_space_nodirty(struct inode *inode, qsize_t number); void dquot_free_inode(struct inode *inode); void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); @@ -257,10 +257,9 @@ static inline void __dquot_free_space(struct inode *inode, qsize_t number, inode_sub_bytes(inode, number); } -static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) +static inline void dquot_claim_space_nodirty(struct inode *inode, qsize_t number) { inode_add_bytes(inode, number); - return 0; } static inline int dquot_reclaim_space_nodirty(struct inode *inode, @@ -358,14 +357,10 @@ static inline int dquot_reserve_block(struct inode *inode, qsize_t nr) DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE); } -static inline int dquot_claim_block(struct inode *inode, qsize_t nr) +static inline void dquot_claim_block(struct inode *inode, qsize_t nr) { - int ret; - - ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits); - if (!ret) - mark_inode_dirty_sync(inode); - return ret; + dquot_claim_space_nodirty(inode, nr << inode->i_blkbits); + mark_inode_dirty_sync(inode); } static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr) diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index f29aaaf2eb21..98030accf641 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -84,8 +84,6 @@ extern const struct raid6_calls raid6_intx1; extern const struct raid6_calls raid6_intx2; extern const struct raid6_calls raid6_intx4; extern const struct raid6_calls raid6_intx8; -extern const struct raid6_calls raid6_intx16; -extern const struct raid6_calls raid6_intx32; extern const struct raid6_calls raid6_mmxx1; extern const struct raid6_calls raid6_mmxx2; extern const struct raid6_calls raid6_sse1x1; @@ -108,6 +106,8 @@ extern const struct raid6_calls raid6_vpermxor1; extern const struct raid6_calls raid6_vpermxor2; extern const struct raid6_calls raid6_vpermxor4; extern const struct raid6_calls raid6_vpermxor8; +extern const struct raid6_calls raid6_lsx; +extern const struct raid6_calls raid6_lasx; struct raid6_recov_calls { void (*data2)(int, size_t, int, int, void **); @@ -123,6 +123,8 @@ extern const struct raid6_recov_calls raid6_recov_avx2; extern const struct raid6_recov_calls raid6_recov_avx512; extern const struct raid6_recov_calls raid6_recov_s390xc; extern const struct raid6_recov_calls raid6_recov_neon; +extern const struct raid6_recov_calls raid6_recov_lsx; +extern const struct raid6_recov_calls raid6_recov_lasx; extern const struct raid6_calls raid6_neonx1; extern const struct raid6_calls raid6_neonx2; diff --git a/include/linux/rcu_notifier.h b/include/linux/rcu_notifier.h new file mode 100644 index 000000000000..5640f024773b --- /dev/null +++ b/include/linux/rcu_notifier.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Read-Copy Update notifiers, initially RCU CPU stall notifier. + * Separate from rcupdate.h to avoid #include loops. + * + * Copyright (C) 2023 Paul E. McKenney. + */ + +#ifndef __LINUX_RCU_NOTIFIER_H +#define __LINUX_RCU_NOTIFIER_H + +// Actions for RCU CPU stall notifier calls. +#define RCU_STALL_NOTIFY_NORM 1 +#define RCU_STALL_NOTIFY_EXP 2 + +#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) + +#include <linux/notifier.h> +#include <linux/types.h> + +int rcu_stall_chain_notifier_register(struct notifier_block *n); +int rcu_stall_chain_notifier_unregister(struct notifier_block *n); + +#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) + +// No RCU CPU stall warnings in Tiny RCU. +static inline int rcu_stall_chain_notifier_register(struct notifier_block *n) { return -EEXIST; } +static inline int rcu_stall_chain_notifier_unregister(struct notifier_block *n) { return -ENOENT; } + +#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) + +#endif /* __LINUX_RCU_NOTIFIER_H */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index d29740be4833..3dc1e58865f7 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -355,7 +355,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, }) /** - * list_next_or_null_rcu - get the first element from a list + * list_next_or_null_rcu - get the next element from a list * @head: the head for the list. * @ptr: the list head to take the next element from. * @type: the type of the struct this is embedded in. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 5e5f920ade90..0746b1b0b663 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -34,9 +34,6 @@ #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) -#define ulong2long(a) (*(long *)(&(a))) -#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b))) -#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b))) /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); @@ -122,8 +119,6 @@ static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) void rcu_init(void); extern int rcu_scheduler_active; void rcu_sched_clock_irq(int user); -void rcu_report_dead(unsigned int cpu); -void rcutree_migrate_callbacks(int cpu); #ifdef CONFIG_TASKS_RCU_GENERIC void rcu_init_tasks_generic(void); @@ -303,6 +298,11 @@ static inline void rcu_lock_acquire(struct lockdep_map *map) lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); } +static inline void rcu_try_lock_acquire(struct lockdep_map *map) +{ + lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_); +} + static inline void rcu_lock_release(struct lockdep_map *map) { lock_release(map, _THIS_IP_); @@ -317,6 +317,7 @@ int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ # define rcu_lock_acquire(a) do { } while (0) +# define rcu_try_lock_acquire(a) do { } while (0) # define rcu_lock_release(a) do { } while (0) static inline int rcu_read_lock_held(void) diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index 5e0f74f2f8ca..d07f0848802e 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -8,6 +8,7 @@ #include <linux/rcupdate.h> #include <linux/completion.h> +#include <linux/sched.h> /* * Structure allowing asynchronous waiting on RCU. @@ -55,4 +56,13 @@ do { \ #define synchronize_rcu_mult(...) \ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) +static inline void cond_resched_rcu(void) +{ +#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) + rcu_read_unlock(); + cond_resched(); + rcu_read_lock(); +#endif +} + #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 7b949292908a..d9ac7b136aea 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -171,6 +171,6 @@ static inline void rcu_all_qs(void) { barrier(); } #define rcutree_offline_cpu NULL #define rcutree_dead_cpu NULL #define rcutree_dying_cpu NULL -static inline void rcu_cpu_starting(unsigned int cpu) { } +static inline void rcutree_report_cpu_starting(unsigned int cpu) { } #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 126f6b418f6a..254244202ea9 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -37,7 +37,6 @@ void synchronize_rcu_expedited(void); void kvfree_call_rcu(struct rcu_head *head, void *ptr); void rcu_barrier(void); -bool rcu_eqs_special_set(int cpu); void rcu_momentary_dyntick_idle(void); void kfree_rcu_scheduler_running(void); bool rcu_gp_might_be_stalled(void); @@ -111,9 +110,21 @@ void rcu_all_qs(void); /* RCUtree hotplug events */ int rcutree_prepare_cpu(unsigned int cpu); int rcutree_online_cpu(unsigned int cpu); -int rcutree_offline_cpu(unsigned int cpu); +void rcutree_report_cpu_starting(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU int rcutree_dead_cpu(unsigned int cpu); int rcutree_dying_cpu(unsigned int cpu); -void rcu_cpu_starting(unsigned int cpu); +int rcutree_offline_cpu(unsigned int cpu); +#else +#define rcutree_dead_cpu NULL +#define rcutree_dying_cpu NULL +#define rcutree_offline_cpu NULL +#endif + +void rcutree_migrate_callbacks(int cpu); + +/* Called from hotplug and also arm64 early secondary boot failure */ +void rcutree_report_cpu_dead(void); #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 2b6bb593be5b..abcdde4df697 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -129,11 +129,14 @@ enum sys_off_mode { * @cb_data: User's callback data. * @cmd: Command string. Currently used only by the sys-off restart mode, * NULL otherwise. + * @dev: Device of the sys-off handler. Only if known (devm_register_*), + * NULL otherwise. */ struct sys_off_data { int mode; void *cb_data; const char *cmd; + struct device *dev; }; struct sys_off_handler * @@ -174,7 +177,17 @@ void ctrl_alt_del(void); extern void orderly_poweroff(bool force); extern void orderly_reboot(void); -void hw_protection_shutdown(const char *reason, int ms_until_forced); +void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown); + +static inline void hw_protection_reboot(const char *reason, int ms_until_forced) +{ + __hw_protection_shutdown(reason, ms_until_forced, false); +} + +static inline void hw_protection_shutdown(const char *reason, int ms_until_forced) +{ + __hw_protection_shutdown(reason, ms_until_forced, true); +} /* * Emergency restart, callable from an interrupt handler. diff --git a/include/linux/refcount.h b/include/linux/refcount.h index a62fcca97486..85c6df0d1bef 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -96,22 +96,11 @@ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/limits.h> +#include <linux/refcount_types.h> #include <linux/spinlock_types.h> struct mutex; -/** - * typedef refcount_t - variant of atomic_t specialized for reference counts - * @refs: atomic_t counter field - * - * The counter saturates at REFCOUNT_SATURATED and will not move once - * there. This avoids wrapping the counter and causing 'spurious' - * use-after-free bugs. - */ -typedef struct refcount_struct { - atomic_t refs; -} refcount_t; - #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } #define REFCOUNT_MAX INT_MAX #define REFCOUNT_SATURATED (INT_MIN / 2) diff --git a/include/linux/refcount_types.h b/include/linux/refcount_types.h new file mode 100644 index 000000000000..162004f06edf --- /dev/null +++ b/include/linux/refcount_types.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_REFCOUNT_TYPES_H +#define _LINUX_REFCOUNT_TYPES_H + +#include <linux/types.h> + +/** + * typedef refcount_t - variant of atomic_t specialized for reference counts + * @refs: atomic_t counter field + * + * The counter saturates at REFCOUNT_SATURATED and will not move once + * there. This avoids wrapping the counter and causing 'spurious' + * use-after-free bugs. + */ +typedef struct refcount_struct { + atomic_t refs; +} refcount_t; + +#endif /* _LINUX_REFCOUNT_TYPES_H */ diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 39b666b40ea6..4660582a3302 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -33,6 +33,7 @@ #include <linux/err.h> #include <linux/suspend.h> +#include <regulator/regulator.h> struct device; struct notifier_block; @@ -85,52 +86,6 @@ struct regulator_dev; #define REGULATOR_MODE_STANDBY 0x8 /* - * Regulator notifier events. - * - * UNDER_VOLTAGE Regulator output is under voltage. - * OVER_CURRENT Regulator output current is too high. - * REGULATION_OUT Regulator output is out of regulation. - * FAIL Regulator output has failed. - * OVER_TEMP Regulator over temp. - * FORCE_DISABLE Regulator forcibly shut down by software. - * VOLTAGE_CHANGE Regulator voltage changed. - * Data passed is old voltage cast to (void *). - * DISABLE Regulator was disabled. - * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed. - * Data passed is "struct pre_voltage_change_data" - * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. - * Data passed is old voltage cast to (void *). - * PRE_DISABLE Regulator is about to be disabled - * ABORT_DISABLE Regulator disable failed for some reason - * - * NOTE: These events can be OR'ed together when passed into handler. - */ - -#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01 -#define REGULATOR_EVENT_OVER_CURRENT 0x02 -#define REGULATOR_EVENT_REGULATION_OUT 0x04 -#define REGULATOR_EVENT_FAIL 0x08 -#define REGULATOR_EVENT_OVER_TEMP 0x10 -#define REGULATOR_EVENT_FORCE_DISABLE 0x20 -#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 -#define REGULATOR_EVENT_DISABLE 0x80 -#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 -#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 -#define REGULATOR_EVENT_PRE_DISABLE 0x400 -#define REGULATOR_EVENT_ABORT_DISABLE 0x800 -#define REGULATOR_EVENT_ENABLE 0x1000 -/* - * Following notifications should be emitted only if detected condition - * is such that the HW is likely to still be working but consumers should - * take a recovery action to prevent problems esacalating into errors. - */ -#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000 -#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000 -#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000 -#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000 -#define REGULATOR_EVENT_WARN_MASK 0x1E000 - -/* * Regulator errors that can be queried using regulator_get_error_flags * * UNDER_VOLTAGE Regulator output is under voltage. diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 4b7eceb3828b..22a07c0900a4 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -51,12 +51,7 @@ enum regulator_detection_severity { /* Initialize struct linear_range for regulators */ #define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \ -{ \ - .min = _min_uV, \ - .min_sel = _min_sel, \ - .max_sel = _max_sel, \ - .step = _step_uV, \ -} + LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) /** * struct regulator_ops - regulator operations. diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 621b7f4a3639..0cd76d264727 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -49,6 +49,13 @@ struct regulator; #define DISABLE_IN_SUSPEND 1 #define ENABLE_IN_SUSPEND 2 +/* + * Default time window (in milliseconds) following a critical under-voltage + * event during which less critical actions can be safely carried out by the + * system. + */ +#define REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS 10 + /* Regulator active discharge flags */ enum regulator_active_discharge { REGULATOR_ACTIVE_DISCHARGE_DEFAULT, @@ -127,6 +134,8 @@ struct notification_limit { * @ramp_disable: Disable ramp delay when initialising or when setting voltage. * @soft_start: Enable soft start so that voltage ramps slowly. * @pull_down: Enable pull down when regulator is disabled. + * @system_critical: Set if the regulator is critical to system stability or + * functionality. * @over_current_protection: Auto disable on over current event. * * @over_current_detection: Configure over current limits. @@ -153,6 +162,13 @@ struct notification_limit { * regulator_active_discharge values are used for * initialisation. * @enable_time: Turn-on time of the rails (unit: microseconds) + * @uv_less_critical_window_ms: Specifies the time window (in milliseconds) + * following a critical under-voltage (UV) event + * during which less critical actions can be + * safely carried out by the system (for example + * logging). After this time window more critical + * actions should be done (for example prevent + * HW damage). */ struct regulation_constraints { @@ -204,6 +220,7 @@ struct regulation_constraints { unsigned int settling_time_up; unsigned int settling_time_down; unsigned int enable_time; + unsigned int uv_less_critical_window_ms; unsigned int active_discharge; @@ -214,6 +231,7 @@ struct regulation_constraints { unsigned ramp_disable:1; /* disable ramp delay */ unsigned soft_start:1; /* ramp voltage slowly */ unsigned pull_down:1; /* pull down resistor when regulator off */ + unsigned system_critical:1; /* critical to system stability */ unsigned over_current_protection:1; /* auto disable on over current */ unsigned over_current_detection:1; /* notify on over current */ unsigned over_voltage_detection:1; /* notify on over voltage */ diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h index c71a6a9fce7a..562386f9b80e 100644 --- a/include/linux/regulator/mt6358-regulator.h +++ b/include/linux/regulator/mt6358-regulator.h @@ -86,6 +86,9 @@ enum { MT6366_ID_VMC, MT6366_ID_VAUD28, MT6366_ID_VSIM2, + MT6366_ID_VM18, + MT6366_ID_VMDDR, + MT6366_ID_VSRAM_CORE, MT6366_ID_RG_MAX, }; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index fe8978eb69f1..b4795698d8c2 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -690,6 +690,10 @@ int rproc_detach(struct rproc *rproc); int rproc_set_firmware(struct rproc *rproc, const char *fw_name); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); + +/* from remoteproc_coredump.c */ +void rproc_coredump_cleanup(struct rproc *rproc); +void rproc_coredump(struct rproc *rproc); void rproc_coredump_using_sections(struct rproc *rproc); int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size); int rproc_coredump_add_custom_segment(struct rproc *rproc, diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8334eeacfec5..66942d7fba7f 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -94,7 +94,7 @@ struct rdt_domain { * zero CBM. * @shareable_bits: Bitmask of shareable resource with other * executing entities - * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid. * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache * level has CPU scope. */ @@ -102,7 +102,7 @@ struct resctrl_cache { unsigned int cbm_len; unsigned int min_cbm_bits; unsigned int shareable_bits; - bool arch_has_sparse_bitmaps; + bool arch_has_sparse_bitmasks; bool arch_has_per_cpu_cfg; }; diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h index 980a65594412..13f17676c5f4 100644 --- a/include/linux/restart_block.h +++ b/include/linux/restart_block.h @@ -7,8 +7,8 @@ #include <linux/compiler.h> #include <linux/types.h> -#include <linux/time64.h> +struct __kernel_timespec; struct timespec; struct old_timespec32; struct pollfd; diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h index 285189454449..e0135e0adae0 100644 --- a/include/linux/resume_user_mode.h +++ b/include/linux/resume_user_mode.h @@ -6,6 +6,7 @@ #include <linux/sched.h> #include <linux/task_work.h> #include <linux/memcontrol.h> +#include <linux/rseq.h> #include <linux/blk-cgroup.h> /** @@ -55,7 +56,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs) } #endif - mem_cgroup_handle_over_high(); + mem_cgroup_handle_over_high(GFP_KERNEL); blkcg_maybe_throttle_current(); rseq_handle_notify_resume(NULL, regs); diff --git a/include/linux/rethook.h b/include/linux/rethook.h index 26b6f3c81a76..ba60962805f6 100644 --- a/include/linux/rethook.h +++ b/include/linux/rethook.h @@ -6,11 +6,10 @@ #define _LINUX_RETHOOK_H #include <linux/compiler.h> -#include <linux/freelist.h> +#include <linux/objpool.h> #include <linux/kallsyms.h> #include <linux/llist.h> #include <linux/rcupdate.h> -#include <linux/refcount.h> struct rethook_node; @@ -29,15 +28,18 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long, */ struct rethook { void *data; - rethook_handler_t handler; - struct freelist_head pool; - refcount_t ref; + /* + * To avoid sparse warnings, this uses a raw function pointer with + * __rcu, instead of rethook_handler_t. But this must be same as + * rethook_handler_t. + */ + void (__rcu *handler) (struct rethook_node *, void *, unsigned long, struct pt_regs *); + struct objpool_head pool; struct rcu_head rcu; }; /** * struct rethook_node - The rethook shadow-stack entry node. - * @freelist: The freelist, linked to struct rethook::pool. * @rcu: The rcu_head for deferred freeing. * @llist: The llist, linked to a struct task_struct::rethooks. * @rethook: The pointer to the struct rethook. @@ -48,20 +50,16 @@ struct rethook { * on each entry of the shadow stack. */ struct rethook_node { - union { - struct freelist_node freelist; - struct rcu_head rcu; - }; + struct rcu_head rcu; struct llist_node llist; struct rethook *rethook; unsigned long ret_addr; unsigned long frame; }; -struct rethook *rethook_alloc(void *data, rethook_handler_t handler); +struct rethook *rethook_alloc(void *data, rethook_handler_t handler, int size, int num); void rethook_stop(struct rethook *rh); void rethook_free(struct rethook *rh); -void rethook_add_node(struct rethook *rh, struct rethook_node *node); struct rethook_node *rethook_try_get(struct rethook *rh); void rethook_recycle(struct rethook_node *node); void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount); @@ -98,4 +96,3 @@ void rethook_flush_task(struct task_struct *tk); #endif #endif - diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h index 57467cbf4c5b..b6f3797277ff 100644 --- a/include/linux/rhashtable-types.h +++ b/include/linux/rhashtable-types.h @@ -12,7 +12,7 @@ #include <linux/atomic.h> #include <linux/compiler.h> #include <linux/mutex.h> -#include <linux/workqueue.h> +#include <linux/workqueue_types.h> struct rhash_head { struct rhash_head __rcu *next; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 782e14f62201..fa802db216f9 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -141,6 +141,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter); bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer); void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); void ring_buffer_reset_online_cpus(struct trace_buffer *buffer); @@ -191,15 +192,24 @@ bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer); size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu); size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu); -void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu); -void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data); -int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page, +struct buffer_data_read_page; +struct buffer_data_read_page * +ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu); +void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, + struct buffer_data_read_page *page); +int ring_buffer_read_page(struct trace_buffer *buffer, + struct buffer_data_read_page *data_page, size_t len, int cpu, int full); +void *ring_buffer_read_page_data(struct buffer_data_read_page *page); struct trace_seq; int ring_buffer_print_entry_header(struct trace_seq *s); -int ring_buffer_print_page_header(struct trace_seq *s); +int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s); + +int ring_buffer_subbuf_order_get(struct trace_buffer *buffer); +int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order); +int ring_buffer_subbuf_size_get(struct trace_buffer *buffer); enum ring_buffer_flags { RB_FL_OVERWRITE = 1 << 0, diff --git a/include/linux/rmap.h b/include/linux/rmap.h index a3825ce81102..b7944a833668 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -121,6 +121,11 @@ static inline void anon_vma_lock_write(struct anon_vma *anon_vma) down_write(&anon_vma->root->rwsem); } +static inline int anon_vma_trylock_write(struct anon_vma *anon_vma) +{ + return down_write_trylock(&anon_vma->root->rwsem); +} + static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) { up_write(&anon_vma->root->rwsem); @@ -172,133 +177,323 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio); typedef int __bitwise rmap_t; /* - * No special request: if the page is a subpage of a compound page, it is - * mapped via a PTE. The mapped (sub)page is possibly shared between processes. + * No special request: A mapped anonymous (sub)page is possibly shared between + * processes. */ #define RMAP_NONE ((__force rmap_t)0) -/* The (sub)page is exclusive to a single process. */ +/* The anonymous (sub)page is exclusive to a single process. */ #define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0)) /* - * The compound page is not mapped via PTEs, but instead via a single PMD and - * should be accounted accordingly. + * Internally, we're using an enum to specify the granularity. We make the + * compiler emit specialized code for each granularity. */ -#define RMAP_COMPOUND ((__force rmap_t)BIT(1)) +enum rmap_level { + RMAP_LEVEL_PTE = 0, + RMAP_LEVEL_PMD, +}; + +static inline void __folio_rmap_sanity_checks(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level) +{ + /* hugetlb folios are handled separately. */ + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + + /* + * TODO: we get driver-allocated folios that have nothing to do with + * the rmap using vm_insert_page(); therefore, we cannot assume that + * folio_test_large_rmappable() holds for large folios. We should + * handle any desired mapcount+stats accounting for these folios in + * VM_MIXEDMAP VMAs separately, and then sanity-check here that + * we really only get rmappable folios. + */ + + VM_WARN_ON_ONCE(nr_pages <= 0); + VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); + VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); + + switch (level) { + case RMAP_LEVEL_PTE: + break; + case RMAP_LEVEL_PMD: + /* + * We don't support folios larger than a single PMD yet. So + * when RMAP_LEVEL_PMD is set, we assume that we are creating + * a single "entire" mapping of the folio. + */ + VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); + VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); + break; + default: + VM_WARN_ON_ONCE(true); + } +} /* * rmap interfaces called when adding or removing pte of page */ -void page_move_anon_rmap(struct page *, struct vm_area_struct *); -void page_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long address, rmap_t flags); -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long address); +void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); +void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages, + struct vm_area_struct *, unsigned long address, rmap_t flags); +#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \ + folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags) +void folio_add_anon_rmap_pmd(struct folio *, struct page *, + struct vm_area_struct *, unsigned long address, rmap_t flags); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); -void page_add_file_rmap(struct page *, struct vm_area_struct *, - bool compound); -void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr, - struct vm_area_struct *, bool compound); -void page_remove_rmap(struct page *, struct vm_area_struct *, - bool compound); - -void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, +void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, + struct vm_area_struct *); +#define folio_add_file_rmap_pte(folio, page, vma) \ + folio_add_file_rmap_ptes(folio, page, 1, vma) +void folio_add_file_rmap_pmd(struct folio *, struct page *, + struct vm_area_struct *); +void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages, + struct vm_area_struct *); +#define folio_remove_rmap_pte(folio, page, vma) \ + folio_remove_rmap_ptes(folio, page, 1, vma) +void folio_remove_rmap_pmd(struct folio *, struct page *, + struct vm_area_struct *); + +void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address, rmap_t flags); -void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *, +void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); -static inline void __page_dup_rmap(struct page *page, bool compound) +/* See folio_try_dup_anon_rmap_*() */ +static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, + struct vm_area_struct *vma) { - if (compound) { - struct folio *folio = (struct folio *)page; + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); - VM_BUG_ON_PAGE(compound && !PageHead(page), page); - atomic_inc(&folio->_entire_mapcount); - } else { - atomic_inc(&page->_mapcount); + if (PageAnonExclusive(&folio->page)) { + if (unlikely(folio_needs_cow_for_dma(vma, folio))) + return -EBUSY; + ClearPageAnonExclusive(&folio->page); } + atomic_inc(&folio->_entire_mapcount); + return 0; +} + +/* See folio_try_share_anon_rmap_*() */ +static inline int hugetlb_try_share_anon_rmap(struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); + + /* Paired with the memory barrier in try_grab_folio(). */ + if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + smp_mb(); + + if (unlikely(folio_maybe_dma_pinned(folio))) + return -EBUSY; + ClearPageAnonExclusive(&folio->page); + + /* + * This is conceptually a smp_wmb() paired with the smp_rmb() in + * gup_must_unshare(). + */ + if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + smp_mb__after_atomic(); + return 0; +} + +static inline void hugetlb_add_file_rmap(struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); + + atomic_inc(&folio->_entire_mapcount); +} + +static inline void hugetlb_remove_rmap(struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + + atomic_dec(&folio->_entire_mapcount); } -static inline void page_dup_file_rmap(struct page *page, bool compound) +static __always_inline void __folio_dup_file_rmap(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level) { - __page_dup_rmap(page, compound); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); + + switch (level) { + case RMAP_LEVEL_PTE: + do { + atomic_inc(&page->_mapcount); + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: + atomic_inc(&folio->_entire_mapcount); + break; + } } /** - * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped - * anonymous page - * @page: the page to duplicate the mapping for - * @compound: the page is mapped as compound or as a small page - * @vma: the source vma + * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio + * @folio: The folio to duplicate the mappings of + * @page: The first page to duplicate the mappings of + * @nr_pages: The number of pages of which the mapping will be duplicated * - * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq. + * The page range of the folio is defined by [page, page + nr_pages) * - * Duplicating the mapping can only fail if the page may be pinned; device - * private pages cannot get pinned and consequently this function cannot fail. + * The caller needs to hold the page table lock. + */ +static inline void folio_dup_file_rmap_ptes(struct folio *folio, + struct page *page, int nr_pages) +{ + __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE); +} +#define folio_dup_file_rmap_pte(folio, page) \ + folio_dup_file_rmap_ptes(folio, page, 1) + +/** + * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio + * @folio: The folio to duplicate the mapping of + * @page: The first page to duplicate the mapping of * - * If duplicating the mapping succeeds, the page has to be mapped R/O into - * the parent and the child. It must *not* get mapped writable after this call. + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * - * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise. + * The caller needs to hold the page table lock. */ -static inline int page_try_dup_anon_rmap(struct page *page, bool compound, - struct vm_area_struct *vma) +static inline void folio_dup_file_rmap_pmd(struct folio *folio, + struct page *page) { - VM_BUG_ON_PAGE(!PageAnon(page), page); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE); +#else + WARN_ON_ONCE(true); +#endif +} - /* - * No need to check+clear for already shared pages, including KSM - * pages. - */ - if (!PageAnonExclusive(page)) - goto dup; +static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *src_vma, + enum rmap_level level) +{ + bool maybe_pinned; + int i; + + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); /* - * If this page may have been pinned by the parent process, - * don't allow to duplicate the mapping but instead require to e.g., - * copy the page immediately for the child so that we'll always - * guarantee the pinned page won't be randomly replaced in the + * If this folio may have been pinned by the parent process, + * don't allow to duplicate the mappings but instead require to e.g., + * copy the subpage immediately for the child so that we'll always + * guarantee the pinned folio won't be randomly replaced in the * future on write faults. */ - if (likely(!is_device_private_page(page) && - unlikely(page_needs_cow_for_dma(vma, page)))) - return -EBUSY; + maybe_pinned = likely(!folio_is_device_private(folio)) && + unlikely(folio_needs_cow_for_dma(src_vma, folio)); - ClearPageAnonExclusive(page); /* - * It's okay to share the anon page between both processes, mapping - * the page R/O into both processes. + * No need to check+clear for already shared PTEs/PMDs of the + * folio. But if any page is PageAnonExclusive, we must fallback to + * copying if the folio maybe pinned. */ -dup: - __page_dup_rmap(page, compound); + switch (level) { + case RMAP_LEVEL_PTE: + if (unlikely(maybe_pinned)) { + for (i = 0; i < nr_pages; i++) + if (PageAnonExclusive(page + i)) + return -EBUSY; + } + do { + if (PageAnonExclusive(page)) + ClearPageAnonExclusive(page); + atomic_inc(&page->_mapcount); + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: + if (PageAnonExclusive(page)) { + if (unlikely(maybe_pinned)) + return -EBUSY; + ClearPageAnonExclusive(page); + } + atomic_inc(&folio->_entire_mapcount); + break; + } return 0; } /** - * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly - * shared to prepare for KSM or temporary unmapping - * @page: the exclusive anonymous page to try marking possibly shared + * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range + * of a folio + * @folio: The folio to duplicate the mappings of + * @page: The first page to duplicate the mappings of + * @nr_pages: The number of pages of which the mapping will be duplicated + * @src_vma: The vm area from which the mappings are duplicated + * + * The page range of the folio is defined by [page, page + nr_pages) * - * The caller needs to hold the PT lock and has to have the page table entry - * cleared/invalidated. + * The caller needs to hold the page table lock and the + * vma->vma_mm->write_protect_seq. * - * This is similar to page_try_dup_anon_rmap(), however, not used during fork() - * to duplicate a mapping, but instead to prepare for KSM or temporarily - * unmapping a page (swap, migration) via page_remove_rmap(). + * Duplicating the mappings can only fail if the folio may be pinned; device + * private folios cannot get pinned and consequently this function cannot fail + * for them. * - * Marking the page shared can only fail if the page may be pinned; device - * private pages cannot get pinned and consequently this function cannot fail. + * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in + * the parent and the child. They must *not* be writable after this call + * succeeded. * - * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY - * otherwise. + * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise. */ -static inline int page_try_share_anon_rmap(struct page *page) +static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *src_vma) { - VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page); + return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, + RMAP_LEVEL_PTE); +} +#define folio_try_dup_anon_rmap_pte(folio, page, vma) \ + folio_try_dup_anon_rmap_ptes(folio, page, 1, vma) - /* device private pages cannot get pinned via GUP. */ - if (unlikely(is_device_private_page(page))) { +/** + * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range + * of a folio + * @folio: The folio to duplicate the mapping of + * @page: The first page to duplicate the mapping of + * @src_vma: The vm area from which the mapping is duplicated + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock and the + * vma->vma_mm->write_protect_seq. + * + * Duplicating the mapping can only fail if the folio may be pinned; device + * private folios cannot get pinned and consequently this function cannot fail + * for them. + * + * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in + * the parent and the child. They must *not* be writable after this call + * succeeded. + * + * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise. + */ +static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, + struct page *page, struct vm_area_struct *src_vma) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma, + RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); + return -EBUSY; +#endif +} + +static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level) +{ + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); + + /* device private folios cannot get pinned via GUP. */ + if (unlikely(folio_is_device_private(folio))) { ClearPageAnonExclusive(page); return 0; } @@ -349,7 +544,7 @@ static inline int page_try_share_anon_rmap(struct page *page) if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) smp_mb(); - if (unlikely(page_maybe_dma_pinned(page))) + if (unlikely(folio_maybe_dma_pinned(folio))) return -EBUSY; ClearPageAnonExclusive(page); @@ -362,6 +557,68 @@ static inline int page_try_share_anon_rmap(struct page *page) return 0; } +/** + * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page + * mapped by a PTE possibly shared to prepare + * for KSM or temporary unmapping + * @folio: The folio to share a mapping of + * @page: The mapped exclusive page + * + * The caller needs to hold the page table lock and has to have the page table + * entries cleared/invalidated. + * + * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during + * fork() to duplicate mappings, but instead to prepare for KSM or temporarily + * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte(). + * + * Marking the mapped page shared can only fail if the folio maybe pinned; + * device private folios cannot get pinned and consequently this function cannot + * fail. + * + * Returns 0 if marking the mapped page possibly shared succeeded. Returns + * -EBUSY otherwise. + */ +static inline int folio_try_share_anon_rmap_pte(struct folio *folio, + struct page *page) +{ + return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE); +} + +/** + * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page + * range mapped by a PMD possibly shared to + * prepare for temporary unmapping + * @folio: The folio to share the mapping of + * @page: The first page to share the mapping of + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock and has to have the page table + * entries cleared/invalidated. + * + * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during + * fork() to duplicate a mapping, but instead to prepare for temporarily + * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd(). + * + * Marking the mapped pages shared can only fail if the folio maybe pinned; + * device private folios cannot get pinned and consequently this function cannot + * fail. + * + * Returns 0 if marking the mapped pages possibly shared succeeded. Returns + * -EBUSY otherwise. + */ +static inline int folio_try_share_anon_rmap_pmd(struct folio *folio, + struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR, + RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); + return -EBUSY; +#endif +} + /* * Called from mm/vmscan.c to handle paging out */ @@ -479,7 +736,6 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, #define anon_vma_init() do {} while (0) #define anon_vma_prepare(vma) (0) -#define anon_vma_link(vma) do {} while (0) static inline int folio_referenced(struct folio *folio, int is_locked, struct mem_cgroup *memcg, diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 523c98b96cb4..90d8e4475f80 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -64,12 +64,14 @@ struct rpmsg_device { }; typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32); +typedef int (*rpmsg_flowcontrol_cb_t)(struct rpmsg_device *, void *, bool); /** * struct rpmsg_endpoint - binds a local rpmsg address to its user * @rpdev: rpmsg channel device * @refcount: when this drops to zero, the ept is deallocated * @cb: rx callback handler + * @flow_cb: remote flow control callback handler * @cb_lock: must be taken before accessing/changing @cb * @addr: local rpmsg address * @priv: private data for the driver's use @@ -92,6 +94,7 @@ struct rpmsg_endpoint { struct rpmsg_device *rpdev; struct kref refcount; rpmsg_rx_cb_t cb; + rpmsg_flowcontrol_cb_t flow_cb; struct mutex cb_lock; u32 addr; void *priv; @@ -106,6 +109,7 @@ struct rpmsg_endpoint { * @probe: invoked when a matching rpmsg channel (i.e. device) is found * @remove: invoked when the rpmsg channel is removed * @callback: invoked when an inbound message is received on the channel + * @flowcontrol: invoked when remote side flow control request is received */ struct rpmsg_driver { struct device_driver drv; @@ -113,6 +117,7 @@ struct rpmsg_driver { int (*probe)(struct rpmsg_device *dev); void (*remove)(struct rpmsg_device *dev); int (*callback)(struct rpmsg_device *, void *, int, void *, u32); + int (*flowcontrol)(struct rpmsg_device *, void *, bool); }; static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val) @@ -192,6 +197,8 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept); +int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst); + #else static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev, @@ -316,6 +323,14 @@ static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept) return -ENXIO; } +static inline int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + #endif /* IS_ENABLED(CONFIG_RPMSG) */ /* use a macro to avoid include chaining to get THIS_MODULE */ diff --git a/include/linux/rseq.h b/include/linux/rseq.h new file mode 100644 index 000000000000..bc8af3eb5598 --- /dev/null +++ b/include/linux/rseq.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +#ifndef _LINUX_RSEQ_H +#define _LINUX_RSEQ_H + +#ifdef CONFIG_RSEQ + +#include <linux/preempt.h> +#include <linux/sched.h> + +/* + * Map the event mask on the user-space ABI enum rseq_cs_flags + * for direct mask checks. + */ +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, + RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, + RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, +}; + +enum rseq_event_mask { + RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), + RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), + RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), +}; + +static inline void rseq_set_notify_resume(struct task_struct *t) +{ + if (t->rseq) + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); +} + +void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); + +static inline void rseq_handle_notify_resume(struct ksignal *ksig, + struct pt_regs *regs) +{ + if (current->rseq) + __rseq_handle_notify_resume(ksig, regs); +} + +static inline void rseq_signal_deliver(struct ksignal *ksig, + struct pt_regs *regs) +{ + preempt_disable(); + __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); + preempt_enable(); + rseq_handle_notify_resume(ksig, regs); +} + +/* rseq_preempt() requires preemption to be disabled. */ +static inline void rseq_preempt(struct task_struct *t) +{ + __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); + rseq_set_notify_resume(t); +} + +/* rseq_migrate() requires preemption to be disabled. */ +static inline void rseq_migrate(struct task_struct *t) +{ + __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); + rseq_set_notify_resume(t); +} + +/* + * If parent process has a registered restartable sequences area, the + * child inherits. Unregister rseq for a clone with CLONE_VM set. + */ +static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) +{ + if (clone_flags & CLONE_VM) { + t->rseq = NULL; + t->rseq_len = 0; + t->rseq_sig = 0; + t->rseq_event_mask = 0; + } else { + t->rseq = current->rseq; + t->rseq_len = current->rseq_len; + t->rseq_sig = current->rseq_sig; + t->rseq_event_mask = current->rseq_event_mask; + } +} + +static inline void rseq_execve(struct task_struct *t) +{ + t->rseq = NULL; + t->rseq_len = 0; + t->rseq_sig = 0; + t->rseq_event_mask = 0; +} + +#else + +static inline void rseq_set_notify_resume(struct task_struct *t) +{ +} +static inline void rseq_handle_notify_resume(struct ksignal *ksig, + struct pt_regs *regs) +{ +} +static inline void rseq_signal_deliver(struct ksignal *ksig, + struct pt_regs *regs) +{ +} +static inline void rseq_preempt(struct task_struct *t) +{ +} +static inline void rseq_migrate(struct task_struct *t) +{ +} +static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) +{ +} +static inline void rseq_execve(struct task_struct *t) +{ +} + +#endif + +#ifdef CONFIG_DEBUG_RSEQ + +void rseq_syscall(struct pt_regs *regs); + +#else + +static inline void rseq_syscall(struct pt_regs *regs) +{ +} + +#endif + +#endif /* _LINUX_RSEQ_H */ diff --git a/include/linux/rslib.h b/include/linux/rslib.h index 238bb85243d3..a04dacbdc8ae 100644 --- a/include/linux/rslib.h +++ b/include/linux/rslib.h @@ -10,7 +10,6 @@ #ifndef _RSLIB_H_ #define _RSLIB_H_ -#include <linux/list.h> #include <linux/types.h> /* for gfp_t */ #include <linux/gfp.h> /* for GFP_KERNEL */ diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 1fd9c6a21ebe..5f8e438a0312 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -146,6 +146,7 @@ struct rtc_device { time64_t range_min; timeu64_t range_max; + timeu64_t alarm_offset_max; time64_t start_secs; time64_t offset_secs; bool set_start_time; @@ -224,6 +225,23 @@ static inline bool is_leap_year(unsigned int year) return (!(year % 4) && (year % 100)) || !(year % 400); } +/** + * rtc_bound_alarmtime() - Return alarm time bound by rtc limit + * @rtc: Pointer to rtc device structure + * @requested: Requested alarm timeout + * + * Return: Alarm timeout bound by maximum alarm time supported by rtc. + */ +static inline ktime_t rtc_bound_alarmtime(struct rtc_device *rtc, + ktime_t requested) +{ + if (rtc->alarm_offset_max && + rtc->alarm_offset_max * MSEC_PER_SEC < ktime_to_ms(requested)) + return ms_to_ktime(rtc->alarm_offset_max * MSEC_PER_SEC); + + return requested; +} + #define devm_rtc_register_device(device) \ __devm_rtc_register_device(THIS_MODULE, device) diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 3d6cf306cd55..410529fca18b 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -10,6 +10,13 @@ #include <uapi/linux/rtnetlink.h> extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); + +static inline int rtnetlink_maybe_send(struct sk_buff *skb, struct net *net, + u32 pid, u32 group, int echo) +{ + return !skb ? 0 : rtnetlink_send(skb, net, pid, group, echo); +} + extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, const struct nlmsghdr *nlh, gfp_t flags); @@ -72,6 +79,18 @@ static inline bool lockdep_rtnl_is_held(void) #define rtnl_dereference(p) \ rcu_dereference_protected(p, lockdep_rtnl_is_held()) +/** + * rcu_replace_pointer_rtnl - replace an RCU pointer under rtnl_lock, returning + * its old value + * @rp: RCU pointer, whose value is returned + * @p: regular pointer + * + * Perform a replacement under rtnl_lock, where @rp is an RCU-annotated + * pointer. The old value of @rp is returned, and @rp is set to @p + */ +#define rcu_replace_pointer_rtnl(rp, p) \ + rcu_replace_pointer(rp, p, lockdep_rtnl_is_held()) + static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) { return rtnl_dereference(dev->ingress_queue); @@ -130,4 +149,26 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, extern void rtnl_offload_xstats_notify(struct net_device *dev); +static inline int rtnl_has_listeners(const struct net *net, u32 group) +{ + struct sock *rtnl = net->rtnl; + + return netlink_has_listeners(rtnl, group); +} + +/** + * rtnl_notify_needed - check if notification is needed + * @net: Pointer to the net namespace + * @nlflags: netlink ingress message flags + * @group: rtnl group + * + * Based on the ingress message flags and rtnl group, returns true + * if a notification is needed, false otherwise. + */ +static inline bool +rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group) +{ + return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group); +} + #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 534038d962e4..4612ef09a0c7 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -60,6 +60,7 @@ #define SD_EXIST (1 << 16) #define DELINK_INT GPIO0_INT #define MS_OC_INT (1 << 23) +#define SD_OVP_INT (1 << 23) #define SD_OC_INT (1 << 22) #define CARD_INT (XD_INT | MS_INT | SD_INT) @@ -80,6 +81,7 @@ #define OC_INT_EN (1 << 23) #define DELINK_INT_EN GPIO0_INT_EN #define MS_OC_INT_EN (1 << 23) +#define SD_OVP_INT_EN (1 << 23) #define SD_OC_INT_EN (1 << 22) #define RTSX_DUM_REG 0x1C @@ -583,6 +585,7 @@ #define OBFF_DISABLE 0x00 #define CDRESUMECTL 0xFE52 +#define CDGW 0xFE53 #define WAKE_SEL_CTL 0xFE54 #define PCLK_CTL 0xFE55 #define PCLK_MODE_SEL 0x20 @@ -764,6 +767,9 @@ #define SD_VIO_LDO_1V8 0x40 #define SD_VIO_LDO_3V3 0x70 +#define RTS5264_AUTOLOAD_CFG2 0xFF7D +#define RTS5264_CHIP_RST_N_SEL (1 << 6) + #define RTS5260_AUTOLOAD_CFG4 0xFF7F #define RTS5260_MIMO_DISABLE 0x8A /*RTS5261*/ @@ -1261,6 +1267,7 @@ struct rtsx_pcr { u8 dma_error_count; u8 ocp_stat; u8 ocp_stat2; + u8 ovp_stat; u8 rtd3_en; }; @@ -1271,6 +1278,7 @@ struct rtsx_pcr { #define PID_5260 0x5260 #define PID_5261 0x5261 #define PID_5228 0x5228 +#define PID_5264 0x5264 #define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid)) #define PCI_VID(pcr) ((pcr)->pci->vendor) diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 1dd530ce8b45..9c29689ff505 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -203,11 +203,11 @@ extern void up_read(struct rw_semaphore *sem); extern void up_write(struct rw_semaphore *sem); DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T)) -DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T)) - -DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T)) -DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T)) +DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T)) +DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0) +DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T)) +DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T)) /* * downgrade write lock to read lock diff --git a/include/linux/sched.h b/include/linux/sched.h index 77f01ac385f7..ffe8f618ab86 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -10,33 +10,41 @@ #include <uapi/linux/sched.h> #include <asm/current.h> - -#include <linux/pid.h> -#include <linux/sem.h> +#include <asm/processor.h> +#include <linux/thread_info.h> +#include <linux/preempt.h> +#include <linux/cpumask.h> + +#include <linux/cache.h> +#include <linux/irqflags_types.h> +#include <linux/smp_types.h> +#include <linux/pid_types.h> +#include <linux/sem_types.h> #include <linux/shm.h> #include <linux/kmsan_types.h> -#include <linux/mutex.h> -#include <linux/plist.h> -#include <linux/hrtimer.h> -#include <linux/irqflags.h> -#include <linux/seccomp.h> -#include <linux/nodemask.h> -#include <linux/rcupdate.h> -#include <linux/refcount.h> +#include <linux/mutex_types.h> +#include <linux/plist_types.h> +#include <linux/hrtimer_types.h> +#include <linux/timer_types.h> +#include <linux/seccomp_types.h> +#include <linux/nodemask_types.h> +#include <linux/refcount_types.h> #include <linux/resource.h> #include <linux/latencytop.h> #include <linux/sched/prio.h> #include <linux/sched/types.h> #include <linux/signal_types.h> -#include <linux/syscall_user_dispatch.h> +#include <linux/syscall_user_dispatch_types.h> #include <linux/mm_types_task.h> #include <linux/task_io_accounting.h> -#include <linux/posix-timers.h> -#include <linux/rseq.h> -#include <linux/seqlock.h> +#include <linux/posix-timers_types.h> +#include <linux/restart_block.h> +#include <uapi/linux/rseq.h> +#include <linux/seqlock_types.h> #include <linux/kcsan.h> #include <linux/rv.h> #include <linux/livepatch_sched.h> +#include <linux/uidgid_types.h> #include <asm/kmap_size.h> /* task_struct member predeclarations (sorted alphabetically): */ @@ -63,12 +71,13 @@ struct robust_list_head; struct root_domain; struct rq; struct sched_attr; -struct sched_param; +struct sched_dl_entity; struct seq_file; struct sighand_struct; struct signal_struct; struct task_delay_info; struct task_group; +struct task_struct; struct user_event_mm; /* @@ -370,6 +379,10 @@ extern struct root_domain def_root_domain; extern struct mutex sched_domains_mutex; #endif +struct sched_param { + int sched_priority; +}; + struct sched_info { #ifdef CONFIG_SCHED_INFO /* Cumulative counters: */ @@ -410,42 +423,6 @@ struct load_weight { u32 inv_weight; }; -/** - * struct util_est - Estimation utilization of FAIR tasks - * @enqueued: instantaneous estimated utilization of a task/cpu - * @ewma: the Exponential Weighted Moving Average (EWMA) - * utilization of a task - * - * Support data structure to track an Exponential Weighted Moving Average - * (EWMA) of a FAIR task's utilization. New samples are added to the moving - * average each time a task completes an activation. Sample's weight is chosen - * so that the EWMA will be relatively insensitive to transient changes to the - * task's workload. - * - * The enqueued attribute has a slightly different meaning for tasks and cpus: - * - task: the task's util_avg at last task dequeue time - * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU - * Thus, the util_est.enqueued of a task represents the contribution on the - * estimated utilization of the CPU where that task is currently enqueued. - * - * Only for tasks we track a moving average of the past instantaneous - * estimated utilization. This allows to absorb sporadic drops in utilization - * of an otherwise almost periodic task. - * - * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg - * updates. When a task is dequeued, its util_est should not be updated if its - * util_avg has not been updated in the meantime. - * This information is mapped into the MSB bit of util_est.enqueued at dequeue - * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg - * for a task) it is safe to use MSB. - */ -struct util_est { - unsigned int enqueued; - unsigned int ewma; -#define UTIL_EST_WEIGHT_SHIFT 2 -#define UTIL_AVG_UNCHANGED 0x80000000 -} __attribute__((__aligned__(sizeof(u64)))); - /* * The load/runnable/util_avg accumulates an infinite geometric series * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). @@ -500,9 +477,20 @@ struct sched_avg { unsigned long load_avg; unsigned long runnable_avg; unsigned long util_avg; - struct util_est util_est; + unsigned int util_est; } ____cacheline_aligned; +/* + * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg + * updates. When a task is dequeued, its util_est should not be updated if its + * util_avg has not been updated in the meantime. + * This information is mapped into the MSB bit of util_est at dequeue time. + * Since max value of util_est for a task is 1024 (PELT util_avg for a task) + * it is safe to use MSB. + */ +#define UTIL_EST_WEIGHT_SHIFT 2 +#define UTIL_AVG_UNCHANGED 0x80000000 + struct sched_statistics { #ifdef CONFIG_SCHEDSTATS u64 wait_start; @@ -520,7 +508,7 @@ struct sched_statistics { u64 block_max; s64 sum_block_runtime; - u64 exec_max; + s64 exec_max; u64 slice_max; u64 nr_migrations_cold; @@ -550,7 +538,7 @@ struct sched_entity { struct load_weight load; struct rb_node run_node; u64 deadline; - u64 min_deadline; + u64 min_vruntime; struct list_head group_node; unsigned int on_rq; @@ -604,6 +592,9 @@ struct sched_rt_entity { #endif } __randomize_layout; +typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); +typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *); + struct sched_dl_entity { struct rb_node rb_node; @@ -651,6 +642,7 @@ struct sched_dl_entity { unsigned int dl_yielded : 1; unsigned int dl_non_contending : 1; unsigned int dl_overrun : 1; + unsigned int dl_server : 1; /* * Bandwidth enforcement timer. Each -deadline task has its @@ -665,7 +657,20 @@ struct sched_dl_entity { * timer is needed to decrease the active utilization at the correct * time. */ - struct hrtimer inactive_timer; + struct hrtimer inactive_timer; + + /* + * Bits for DL-server functionality. Also see the comment near + * dl_server_update(). + * + * @rq the runqueue this server is for + * + * @server_has_tasks() returns true if @server_pick return a + * runnable task. + */ + struct rq *rq; + dl_server_has_tasks_f server_has_tasks; + dl_server_pick_f server_pick; #ifdef CONFIG_RT_MUTEXES /* @@ -750,10 +755,8 @@ struct task_struct { #endif unsigned int __state; -#ifdef CONFIG_PREEMPT_RT /* saved state for "spinlock sleepers" */ unsigned int saved_state; -#endif /* * This begins the randomizable portion of task_struct. Only @@ -794,6 +797,7 @@ struct task_struct { struct sched_entity se; struct sched_rt_entity rt; struct sched_dl_entity dl; + struct sched_dl_entity *dl_server; const struct sched_class *sched_class; #ifdef CONFIG_SCHED_CORE @@ -875,6 +879,7 @@ struct task_struct { struct mm_struct *mm; struct mm_struct *active_mm; + struct address_space *faults_disabled_mapping; int exit_state; int exit_code; @@ -911,8 +916,11 @@ struct task_struct { * ->sched_remote_wakeup gets used, so it can be in this word. */ unsigned sched_remote_wakeup:1; +#ifdef CONFIG_RT_MUTEXES + unsigned sched_rt_mutex:1; +#endif - /* Bit to tell LSMs we're in execve(): */ + /* Bit to tell TOMOYO we're in execve(): */ unsigned in_execve:1; unsigned in_iowait:1; #ifndef TIF_RESTORE_SIGMASK @@ -949,7 +957,7 @@ struct task_struct { /* Recursion prevention for eventfd_signal() */ unsigned in_eventfd:1; #endif -#ifdef CONFIG_IOMMU_SVA +#ifdef CONFIG_ARCH_HAS_CPU_PASID unsigned pasid_activated:1; #endif #ifdef CONFIG_CPU_SUP_INTEL @@ -1002,7 +1010,6 @@ struct task_struct { /* PID/PID hash table linkage. */ struct pid *thread_pid; struct hlist_node pid_links[PIDTYPE_MAX]; - struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; @@ -1443,6 +1450,10 @@ struct task_struct { struct mem_cgroup *active_memcg; #endif +#ifdef CONFIG_MEMCG_KMEM + struct obj_cgroup *objcg; +#endif + #ifdef CONFIG_BLK_CGROUP struct gendisk *throttle_disk; #endif @@ -1553,114 +1564,6 @@ struct task_struct { */ }; -static inline struct pid *task_pid(struct task_struct *task) -{ - return task->thread_pid; -} - -/* - * the helpers to get the task's different pids as they are seen - * from various namespaces - * - * task_xid_nr() : global id, i.e. the id seen from the init namespace; - * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of - * current. - * task_xid_nr_ns() : id seen from the ns specified; - * - * see also pid_nr() etc in include/linux/pid.h - */ -pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); - -static inline pid_t task_pid_nr(struct task_struct *tsk) -{ - return tsk->pid; -} - -static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); -} - -static inline pid_t task_pid_vnr(struct task_struct *tsk) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); -} - - -static inline pid_t task_tgid_nr(struct task_struct *tsk) -{ - return tsk->tgid; -} - -/** - * pid_alive - check that a task structure is not stale - * @p: Task structure to be checked. - * - * Test if a process is not yet dead (at most zombie state) - * If pid_alive fails, then pointers within the task structure - * can be stale and must not be dereferenced. - * - * Return: 1 if the process is alive. 0 otherwise. - */ -static inline int pid_alive(const struct task_struct *p) -{ - return p->thread_pid != NULL; -} - -static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); -} - -static inline pid_t task_pgrp_vnr(struct task_struct *tsk) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); -} - - -static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); -} - -static inline pid_t task_session_vnr(struct task_struct *tsk) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); -} - -static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); -} - -static inline pid_t task_tgid_vnr(struct task_struct *tsk) -{ - return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); -} - -static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) -{ - pid_t pid = 0; - - rcu_read_lock(); - if (pid_alive(tsk)) - pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); - rcu_read_unlock(); - - return pid; -} - -static inline pid_t task_ppid_nr(const struct task_struct *tsk) -{ - return task_ppid_nr_ns(tsk, &init_pid_ns); -} - -/* Obsolete, do not use: */ -static inline pid_t task_pgrp_nr(struct task_struct *tsk) -{ - return task_pgrp_nr_ns(tsk, &init_pid_ns); -} - #define TASK_REPORT_IDLE (TASK_REPORT + 1) #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) @@ -1704,20 +1607,6 @@ static inline char task_state_to_char(struct task_struct *tsk) return task_index_to_char(task_state_index(tsk)); } -/** - * is_global_init - check if a task structure is init. Since init - * is free to have sub-threads we need to check tgid. - * @tsk: Task structure to be checked. - * - * Check if a task structure is the first user space task the kernel created. - * - * Return: 1 if the task structure is init. 0 otherwise. - */ -static inline int is_global_init(struct task_struct *tsk) -{ - return task_tgid_nr(tsk) == 1; -} - extern struct pid *cad_pid; /* @@ -1947,9 +1836,7 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p); void yield(void); union thread_union { -#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK struct task_struct task; -#endif #ifndef CONFIG_THREAD_INFO_IN_TASK struct thread_info thread_info; #endif @@ -2169,15 +2056,6 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock); __cond_resched_rwlock_write(lock); \ }) -static inline void cond_resched_rcu(void) -{ -#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) - rcu_read_unlock(); - cond_resched(); - rcu_read_lock(); -#endif -} - #ifdef CONFIG_PREEMPT_DYNAMIC extern bool preempt_model_none(void); @@ -2219,37 +2097,6 @@ static inline bool preempt_model_preemptible(void) return preempt_model_full() || preempt_model_rt(); } -/* - * Does a critical section need to be broken due to another - * task waiting?: (technically does not depend on CONFIG_PREEMPTION, - * but a general need for low latency) - */ -static inline int spin_needbreak(spinlock_t *lock) -{ -#ifdef CONFIG_PREEMPTION - return spin_is_contended(lock); -#else - return 0; -#endif -} - -/* - * Check if a rwlock is contended. - * Returns non-zero if there is another task waiting on the rwlock. - * Returns zero if the lock is not contended or the system / underlying - * rwlock implementation does not support contention detection. - * Technically does not depend on CONFIG_PREEMPTION, but a general need - * for low latency. - */ -static inline int rwlock_needbreak(rwlock_t *lock) -{ -#ifdef CONFIG_PREEMPTION - return rwlock_is_contended(lock); -#else - return 0; -#endif -} - static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); @@ -2284,6 +2131,8 @@ extern bool sched_task_on_rq(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p); extern struct task_struct *cpu_curr_snapshot(int cpu); +#include <linux/spinlock.h> + /* * In order to reduce various lock holder preemption latencies provide an * interface to see if a vCPU is currently running or not. @@ -2320,129 +2169,6 @@ static inline bool owner_on_cpu(struct task_struct *owner) unsigned long sched_cpu_util(int cpu); #endif /* CONFIG_SMP */ -#ifdef CONFIG_RSEQ - -/* - * Map the event mask on the user-space ABI enum rseq_cs_flags - * for direct mask checks. - */ -enum rseq_event_mask_bits { - RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, - RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, - RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, -}; - -enum rseq_event_mask { - RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), - RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), - RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), -}; - -static inline void rseq_set_notify_resume(struct task_struct *t) -{ - if (t->rseq) - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); -} - -void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); - -static inline void rseq_handle_notify_resume(struct ksignal *ksig, - struct pt_regs *regs) -{ - if (current->rseq) - __rseq_handle_notify_resume(ksig, regs); -} - -static inline void rseq_signal_deliver(struct ksignal *ksig, - struct pt_regs *regs) -{ - preempt_disable(); - __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); - preempt_enable(); - rseq_handle_notify_resume(ksig, regs); -} - -/* rseq_preempt() requires preemption to be disabled. */ -static inline void rseq_preempt(struct task_struct *t) -{ - __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); - rseq_set_notify_resume(t); -} - -/* rseq_migrate() requires preemption to be disabled. */ -static inline void rseq_migrate(struct task_struct *t) -{ - __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); - rseq_set_notify_resume(t); -} - -/* - * If parent process has a registered restartable sequences area, the - * child inherits. Unregister rseq for a clone with CLONE_VM set. - */ -static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) -{ - if (clone_flags & CLONE_VM) { - t->rseq = NULL; - t->rseq_len = 0; - t->rseq_sig = 0; - t->rseq_event_mask = 0; - } else { - t->rseq = current->rseq; - t->rseq_len = current->rseq_len; - t->rseq_sig = current->rseq_sig; - t->rseq_event_mask = current->rseq_event_mask; - } -} - -static inline void rseq_execve(struct task_struct *t) -{ - t->rseq = NULL; - t->rseq_len = 0; - t->rseq_sig = 0; - t->rseq_event_mask = 0; -} - -#else - -static inline void rseq_set_notify_resume(struct task_struct *t) -{ -} -static inline void rseq_handle_notify_resume(struct ksignal *ksig, - struct pt_regs *regs) -{ -} -static inline void rseq_signal_deliver(struct ksignal *ksig, - struct pt_regs *regs) -{ -} -static inline void rseq_preempt(struct task_struct *t) -{ -} -static inline void rseq_migrate(struct task_struct *t) -{ -} -static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) -{ -} -static inline void rseq_execve(struct task_struct *t) -{ -} - -#endif - -#ifdef CONFIG_DEBUG_RSEQ - -void rseq_syscall(struct pt_regs *regs); - -#else - -static inline void rseq_syscall(struct pt_regs *regs) -{ -} - -#endif - #ifdef CONFIG_SCHED_CORE extern void sched_core_free(struct task_struct *tsk); extern void sched_core_fork(struct task_struct *p); diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 0ee96ea7a0e9..02f5090ffea2 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ +#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */ #define MMF_MULTIPROCESS 26 /* mm is shared between processes */ /* @@ -85,10 +86,22 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_HAS_MDWE 28 #define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE) -#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) + +#define MMF_HAS_MDWE_NO_INHERIT 29 + +#define MMF_VM_MERGE_ANY 30 +#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ - MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK) + MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\ + MMF_VM_MERGE_ANY_MASK) + +static inline unsigned long mmf_init_flags(unsigned long flags) +{ + if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT)) + flags &= ~((1UL << MMF_HAS_MDWE) | + (1UL << MMF_HAS_MDWE_NO_INHERIT)); + return flags & MMF_INIT_MASK; +} -#define MMF_VM_MERGE_ANY 29 #endif /* _LINUX_SCHED_COREDUMP_H */ diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 7c83d4d5a971..df3aca89d4f5 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_DEADLINE_H +#define _LINUX_SCHED_DEADLINE_H /* * SCHED_DEADLINE tasks has negative priorities, reflecting @@ -34,3 +36,5 @@ extern void dl_add_task_root_domain(struct task_struct *p); extern void dl_clear_root_domain(struct root_domain *rd); #endif /* CONFIG_SMP */ + +#endif /* _LINUX_SCHED_DEADLINE_H */ diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index fe1a46f30d24..2b461129d1fa 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -2,6 +2,7 @@ #define _LINUX_SCHED_ISOLATION_H #include <linux/cpumask.h> +#include <linux/cpuset.h> #include <linux/init.h> #include <linux/tick.h> @@ -67,7 +68,8 @@ static inline bool housekeeping_cpu(int cpu, enum hk_type type) static inline bool cpu_is_isolated(int cpu) { return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) || - !housekeeping_test_cpu(cpu, HK_TYPE_TICK); + !housekeeping_test_cpu(cpu, HK_TYPE_TICK) || + cpuset_cpu_is_isolated(cpu); } #endif /* _LINUX_SCHED_ISOLATION_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 8d89c8c4fac1..9a19f1b42f64 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -403,6 +403,10 @@ DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); * __GFP_ACCOUNT allocations till the end of the scope will be charged to the * given memcg. * + * Please, make sure that caller has a reference to the passed memcg structure, + * so its lifetime is guaranteed to exceed the scope between two + * set_active_memcg() calls. + * * NOTE: This function can nest. Users must save the return value and * reset the previous value after their own charging scope is over. */ diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h index 3988762efe15..52b22c5c396d 100644 --- a/include/linux/sched/numa_balancing.h +++ b/include/linux/sched/numa_balancing.h @@ -15,13 +15,23 @@ #define TNF_FAULT_LOCAL 0x08 #define TNF_MIGRATE_FAIL 0x10 +enum numa_vmaskip_reason { + NUMAB_SKIP_UNSUITABLE, + NUMAB_SKIP_SHARED_RO, + NUMAB_SKIP_INACCESSIBLE, + NUMAB_SKIP_SCAN_DELAY, + NUMAB_SKIP_PID_INACTIVE, + NUMAB_SKIP_IGNORE_PID, + NUMAB_SKIP_SEQ_COMPLETED, +}; + #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); extern void set_numabalancing_state(bool enabled); extern void task_numa_free(struct task_struct *p, bool final); -extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, - int src_nid, int dst_cpu); +bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio, + int src_nid, int dst_cpu); #else static inline void task_numa_fault(int last_node, int node, int pages, int flags) @@ -38,7 +48,7 @@ static inline void task_numa_free(struct task_struct *p, bool final) { } static inline bool should_numa_migrate_memory(struct task_struct *p, - struct page *page, int src_nid, int dst_cpu) + struct folio *folio, int src_nid, int dst_cpu) { return true; } diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index 994c25640e15..b2b9e6eb9683 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk) } #ifdef CONFIG_RT_MUTEXES +extern void rt_mutex_pre_schedule(void); +extern void rt_mutex_schedule(void); +extern void rt_mutex_post_schedule(void); + /* * Must hold either p->pi_lock or task_rq(p)->lock. */ diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index fad77b5172e2..a8b28647aafc 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -110,6 +110,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) /* + * Domain members share CPU cluster (LLC tags or L2 cache) + * + * NEEDS_GROUPS: Clusters are shared between groups. + */ +SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS) + +/* * Domain members share CPU package resources (i.e. caches) * * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 0014d3adaf84..4b7664c56208 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -9,6 +9,7 @@ #include <linux/sched/task.h> #include <linux/cred.h> #include <linux/refcount.h> +#include <linux/pid.h> #include <linux/posix-timers.h> #include <linux/mm_types.h> #include <asm/ptrace.h> @@ -303,20 +304,11 @@ static inline void kernel_signal_stop(void) schedule(); } -#ifdef __ia64__ -# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 -#else -# define ___ARCH_SI_IA64(_a1, _a2, _a3) -#endif -int force_sig_fault_to_task(int sig, int code, void __user *addr - ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) - , struct task_struct *t); -int force_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); -int send_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) - , struct task_struct *t); +int force_sig_fault_to_task(int sig, int code, void __user *addr, + struct task_struct *t); +int force_sig_fault(int sig, int code, void __user *addr); +int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t); int force_sig_mceerr(int code, void __user *, short); int send_sig_mceerr(int code, void __user *, short, struct task_struct *); @@ -441,7 +433,6 @@ static inline bool fault_signal_pending(vm_fault_t fault_flags, * This is required every time the blocked sigset_t changes. * callers must hold sighand->siglock. */ -extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending(void); extern void calculate_sigpending(void); @@ -655,8 +646,12 @@ extern bool current_is_single_threaded(void); #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) +#define for_other_threads(p, t) \ + for (t = p; (t = next_thread(t)) != p; ) + #define __for_each_thread(signal, t) \ - list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) + list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \ + lockdep_is_held(&tasklist_lock)) #define for_each_thread(p, t) \ __for_each_thread((p)->signal, t) @@ -715,15 +710,26 @@ bool same_thread_group(struct task_struct *p1, struct task_struct *p2) return p1->signal == p2->signal; } -static inline struct task_struct *next_thread(const struct task_struct *p) +/* + * returns NULL if p is the last thread in the thread group + */ +static inline struct task_struct *__next_thread(struct task_struct *p) +{ + return list_next_or_null_rcu(&p->signal->thread_head, + &p->thread_node, + struct task_struct, + thread_node); +} + +static inline struct task_struct *next_thread(struct task_struct *p) { - return list_entry_rcu(p->thread_group.next, - struct task_struct, thread_group); + return __next_thread(p) ?: p->group_leader; } static inline int thread_group_empty(struct task_struct *p) { - return list_empty(&p->thread_group); + return thread_group_leader(p) && + list_is_last(&p->thread_node, &p->signal->thread_head); } #define delay_group_leader(p) \ diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h index 59d3736c454c..fb1e295e7e63 100644 --- a/include/linux/sched/smt.h +++ b/include/linux/sched/smt.h @@ -17,4 +17,4 @@ static inline bool sched_smt_active(void) { return false; } void arch_smt_update(void); -#endif +#endif /* _LINUX_SCHED_SMT_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index a23af225c898..d362aacf9f89 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -7,6 +7,8 @@ * functionality: */ +#include <linux/rcupdate.h> +#include <linux/refcount.h> #include <linux/sched.h> #include <linux/uaccess.h> @@ -226,4 +228,6 @@ static inline void task_unlock(struct task_struct *p) spin_unlock(&p->alloc_lock); } +DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T)) + #endif /* _LINUX_SCHED_TASK_H */ diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index f158b025c175..ccd72b978e1f 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -8,6 +8,7 @@ #include <linux/sched.h> #include <linux/magic.h> +#include <linux/refcount.h> #ifdef CONFIG_THREAD_INFO_IN_TASK diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 67b573d5bf28..a6e04b4a21d7 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void) #ifdef CONFIG_SCHED_CLUSTER static inline int cpu_cluster_flags(void) { - return SD_SHARE_PKG_RESOURCES; + return SD_CLUSTER | SD_SHARE_PKG_RESOURCES; } #endif @@ -109,8 +109,6 @@ struct sched_domain { u64 max_newidle_lb_cost; unsigned long last_decay_max_lb_cost; - u64 avg_scan_cost; /* select_idle_sibling */ - #ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ unsigned int lb_count[CPU_MAX_IDLE_TYPES]; @@ -179,6 +177,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms); void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); bool cpus_share_cache(int this_cpu, int that_cpu); +bool cpus_share_resources(int this_cpu, int that_cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); typedef int (*sched_domain_flags_f)(void); @@ -232,6 +231,11 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) return true; } +static inline bool cpus_share_resources(int this_cpu, int that_cpu) +{ + return true; +} + #endif /* !CONFIG_SMP */ #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) @@ -275,6 +279,14 @@ void arch_update_thermal_pressure(const struct cpumask *cpus, { } #endif +#ifndef arch_scale_freq_ref +static __always_inline +unsigned int arch_scale_freq_ref(int cpu) +{ + return 0; +} +#endif + static inline int task_node(const struct task_struct *p) { return cpu_to_node(task_cpu(p)); diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h index 3c3e049224ae..969aaf5ef9d6 100644 --- a/include/linux/sched/types.h +++ b/include/linux/sched/types.h @@ -20,4 +20,4 @@ struct task_cputime { unsigned long long sum_exec_runtime; }; -#endif +#endif /* _LINUX_SCHED_TYPES_H */ diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h index 837a23624a66..bc60243d43b3 100644 --- a/include/linux/sched/vhost_task.h +++ b/include/linux/sched/vhost_task.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_VHOST_TASK_H -#define _LINUX_VHOST_TASK_H - +#ifndef _LINUX_SCHED_VHOST_TASK_H +#define _LINUX_SCHED_VHOST_TASK_H struct vhost_task; @@ -11,4 +10,4 @@ void vhost_task_start(struct vhost_task *vtsk); void vhost_task_stop(struct vhost_task *vtsk); void vhost_task_wake(struct vhost_task *vtsk); -#endif +#endif /* _LINUX_SCHED_VHOST_TASK_H */ diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index e6fe4f73ffe6..f2f05fb42d28 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -58,6 +58,8 @@ struct scmi_clock_info { u64 step_size; } range; }; + int num_parents; + u32 *parents; }; enum scmi_power_scale { @@ -80,6 +82,11 @@ struct scmi_protocol_handle; * @rate_set: set the clock rate of a clock * @enable: enables the specified clock * @disable: disables the specified clock + * @state_get: get the status of the specified clock + * @config_oem_get: get the value of an OEM specific clock config + * @config_oem_set: set the value of an OEM specific clock config + * @parent_get: get the parent id of a clk + * @parent_set: set the parent of a clock */ struct scmi_clk_proto_ops { int (*count_get)(const struct scmi_protocol_handle *ph); @@ -90,22 +97,36 @@ struct scmi_clk_proto_ops { u64 *rate); int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u64 rate); - int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id); - int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id); - int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id); - int (*disable_atomic)(const struct scmi_protocol_handle *ph, - u32 clk_id); + int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id, + bool atomic); + int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id, + bool atomic); + int (*state_get)(const struct scmi_protocol_handle *ph, u32 clk_id, + bool *enabled, bool atomic); + int (*config_oem_get)(const struct scmi_protocol_handle *ph, u32 clk_id, + u8 oem_type, u32 *oem_val, u32 *attributes, + bool atomic); + int (*config_oem_set)(const struct scmi_protocol_handle *ph, u32 clk_id, + u8 oem_type, u32 oem_val, bool atomic); + int (*parent_get)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 *parent_id); + int (*parent_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 parent_id); +}; + +struct scmi_perf_domain_info { + char name[SCMI_MAX_STR_SIZE]; + bool set_perf; }; /** * struct scmi_perf_proto_ops - represents the various operations provided * by SCMI Performance Protocol * + * @num_domains_get: gets the number of supported performance domains + * @info_get: get the information of a performance domain * @limits_set: sets limits on the performance level of a domain * @limits_get: gets limits on the performance level of a domain * @level_set: sets the performance level of a domain * @level_get: gets the performance level of a domain - * @device_domain_id: gets the scmi domain id for a given device * @transition_latency_get: gets the DVFS transition latency for a given device * @device_opps_add: adds all the OPPs for a given device * @freq_set: sets the frequency for a given device using sustained frequency @@ -120,6 +141,9 @@ struct scmi_clk_proto_ops { * or in some other (abstract) scale */ struct scmi_perf_proto_ops { + int (*num_domains_get)(const struct scmi_protocol_handle *ph); + const struct scmi_perf_domain_info __must_check *(*info_get) + (const struct scmi_protocol_handle *ph, u32 domain); int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain, u32 max_perf, u32 min_perf); int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain, @@ -128,11 +152,10 @@ struct scmi_perf_proto_ops { u32 level, bool poll); int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain, u32 *level, bool poll); - int (*device_domain_id)(struct device *dev); int (*transition_latency_get)(const struct scmi_protocol_handle *ph, - struct device *dev); + u32 domain); int (*device_opps_add)(const struct scmi_protocol_handle *ph, - struct device *dev); + struct device *dev, u32 domain); int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain, unsigned long rate, bool poll); int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain, @@ -140,7 +163,7 @@ struct scmi_perf_proto_ops { int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain, unsigned long *rate, unsigned long *power); bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph, - struct device *dev); + u32 domain); enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph); }; diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 175079552f68..709ad84809e1 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -3,6 +3,7 @@ #define _LINUX_SECCOMP_H #include <uapi/linux/seccomp.h> +#include <linux/seccomp_types.h> #define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ SECCOMP_FILTER_FLAG_LOG | \ @@ -21,25 +22,6 @@ #include <linux/atomic.h> #include <asm/seccomp.h> -struct seccomp_filter; -/** - * struct seccomp - the state of a seccomp'ed process - * - * @mode: indicates one of the valid values above for controlled - * system calls available to a process. - * @filter_count: number of seccomp filters - * @filter: must always point to a valid seccomp-filter or NULL as it is - * accessed without locking during system call entry. - * - * @filter must only be accessed from the context of current as there - * is no read locking. - */ -struct seccomp { - int mode; - atomic_t filter_count; - struct seccomp_filter *filter; -}; - #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER extern int __secure_computing(const struct seccomp_data *sd); static inline int secure_computing(void) @@ -64,8 +46,6 @@ static inline int seccomp_mode(struct seccomp *s) #include <linux/errno.h> -struct seccomp { }; -struct seccomp_filter { }; struct seccomp_data; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER @@ -126,6 +106,8 @@ static inline long seccomp_get_metadata(struct task_struct *task, #ifdef CONFIG_SECCOMP_CACHE_DEBUG struct seq_file; +struct pid_namespace; +struct pid; int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task); diff --git a/include/linux/seccomp_types.h b/include/linux/seccomp_types.h new file mode 100644 index 000000000000..cf0a0355024f --- /dev/null +++ b/include/linux/seccomp_types.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SECCOMP_TYPES_H +#define _LINUX_SECCOMP_TYPES_H + +#include <linux/types.h> + +#ifdef CONFIG_SECCOMP + +struct seccomp_filter; +/** + * struct seccomp - the state of a seccomp'ed process + * + * @mode: indicates one of the valid values above for controlled + * system calls available to a process. + * @filter_count: number of seccomp filters + * @filter: must always point to a valid seccomp-filter or NULL as it is + * accessed without locking during system call entry. + * + * @filter must only be accessed from the context of current as there + * is no read locking. + */ +struct seccomp { + int mode; + atomic_t filter_count; + struct seccomp_filter *filter; +}; + +#else + +struct seccomp { }; +struct seccomp_filter { }; + +#endif + +#endif /* _LINUX_SECCOMP_TYPES_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 5f16eecde00b..d0eb20f90b26 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -32,6 +32,7 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/sockptr.h> +#include <uapi/linux/lsm.h> struct linux_binprm; struct cred; @@ -60,6 +61,7 @@ struct fs_parameter; enum fs_value_type; struct watch; struct watch_notification; +struct lsm_ctx; /* Default (no) options for the capable function */ #define CAP_OPT_NONE 0x0 @@ -138,6 +140,8 @@ enum lockdown_reason { }; extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1]; +extern u32 lsm_active_cnt; +extern const struct lsm_id *lsm_idlist[]; /* These functions are in security/commoncap.c */ extern int cap_capable(const struct cred *cred, struct user_namespace *ns, @@ -151,7 +155,7 @@ extern int cap_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); -extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); +extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file); int cap_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int cap_inode_removexattr(struct mnt_idmap *idmap, @@ -261,6 +265,7 @@ int unregister_blocking_lsm_notifier(struct notifier_block *nb); /* prototypes */ extern int security_init(void); extern int early_security_init(void); +extern u64 lsm_name_to_attr(const char *name); /* Security operations */ int security_binder_set_context_mgr(const struct cred *mgr); @@ -284,16 +289,16 @@ int security_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts); -int security_quotactl(int cmds, int type, int id, struct super_block *sb); +int security_quotactl(int cmds, int type, int id, const struct super_block *sb); int security_quota_on(struct dentry *dentry); int security_syslog(int type); int security_settime64(const struct timespec64 *ts, const struct timezone *tz); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_bprm_creds_for_exec(struct linux_binprm *bprm); -int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); +int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file); int security_bprm_check(struct linux_binprm *bprm); -void security_bprm_committing_creds(struct linux_binprm *bprm); -void security_bprm_committed_creds(struct linux_binprm *bprm); +void security_bprm_committing_creds(const struct linux_binprm *bprm); +void security_bprm_committed_creds(const struct linux_binprm *bprm); int security_fs_context_submount(struct fs_context *fc, struct super_block *reference); int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); @@ -304,7 +309,7 @@ void security_free_mnt_opts(void **mnt_opts); int security_sb_eat_lsm_opts(char *options, void **mnt_opts); int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts); int security_sb_remount(struct super_block *sb, void *mnt_opts); -int security_sb_kern_mount(struct super_block *sb); +int security_sb_kern_mount(const struct super_block *sb); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(const char *dev_name, const struct path *path, @@ -389,6 +394,8 @@ int security_file_permission(struct file *file, int mask); int security_file_alloc(struct file *file); void security_file_free(struct file *file); int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +int security_file_ioctl_compat(struct file *file, unsigned int cmd, + unsigned long arg); int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags); int security_mmap_addr(unsigned long addr); @@ -470,10 +477,13 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd); int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter); void security_d_instantiate(struct dentry *dentry, struct inode *inode); -int security_getprocattr(struct task_struct *p, const char *lsm, const char *name, +int security_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx, + size_t __user *size, u32 flags); +int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx, + size_t size, u32 flags); +int security_getprocattr(struct task_struct *p, int lsmid, const char *name, char **value); -int security_setprocattr(const char *lsm, const char *name, void *value, - size_t size); +int security_setprocattr(int lsmid, const char *name, void *value, size_t size); int security_netlink_send(struct sock *sk, struct sk_buff *skb); int security_ismaclabel(const char *name); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); @@ -484,6 +494,8 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); int security_locked_down(enum lockdown_reason what); +int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, size_t *uctx_len, + void *val, size_t val_len, u64 id, u64 flags); #else /* CONFIG_SECURITY */ static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) @@ -501,6 +513,11 @@ static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb) return 0; } +static inline u64 lsm_name_to_attr(const char *name) +{ + return LSM_ATTR_UNDEF; +} + static inline void security_free_mnt_opts(void **mnt_opts) { } @@ -581,7 +598,7 @@ static inline int security_capable(const struct cred *cred, } static inline int security_quotactl(int cmds, int type, int id, - struct super_block *sb) + const struct super_block *sb) { return 0; } @@ -613,7 +630,7 @@ static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm) } static inline int security_bprm_creds_from_file(struct linux_binprm *bprm, - struct file *file) + const struct file *file) { return cap_bprm_creds_from_file(bprm, file); } @@ -623,11 +640,11 @@ static inline int security_bprm_check(struct linux_binprm *bprm) return 0; } -static inline void security_bprm_committing_creds(struct linux_binprm *bprm) +static inline void security_bprm_committing_creds(const struct linux_binprm *bprm) { } -static inline void security_bprm_committed_creds(struct linux_binprm *bprm) +static inline void security_bprm_committed_creds(const struct linux_binprm *bprm) { } @@ -987,6 +1004,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd, return 0; } +static inline int security_file_ioctl_compat(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + return 0; +} + static inline int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags) { @@ -1337,14 +1361,28 @@ static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode) { } -static inline int security_getprocattr(struct task_struct *p, const char *lsm, +static inline int security_getselfattr(unsigned int attr, + struct lsm_ctx __user *ctx, + size_t __user *size, u32 flags) +{ + return -EOPNOTSUPP; +} + +static inline int security_setselfattr(unsigned int attr, + struct lsm_ctx __user *ctx, + size_t size, u32 flags) +{ + return -EOPNOTSUPP; +} + +static inline int security_getprocattr(struct task_struct *p, int lsmid, const char *name, char **value) { return -EINVAL; } -static inline int security_setprocattr(const char *lsm, char *name, - void *value, size_t size) +static inline int security_setprocattr(int lsmid, char *name, void *value, + size_t size) { return -EINVAL; } @@ -1395,6 +1433,12 @@ static inline int security_locked_down(enum lockdown_reason what) { return 0; } +static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, + size_t *uctx_len, void *val, size_t val_len, + u64 id, u64 flags) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_SECURITY */ #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) diff --git a/include/linux/sed-opal-key.h b/include/linux/sed-opal-key.h new file mode 100644 index 000000000000..0ca03054e8f6 --- /dev/null +++ b/include/linux/sed-opal-key.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * SED key operations. + * + * Copyright (C) 2023 IBM Corporation + * + * These are the accessor functions (read/write) for SED Opal + * keys. Specific keystores can provide overrides. + * + */ + +#include <linux/kernel.h> + +#ifdef CONFIG_PSERIES_PLPKS_SED +int sed_read_key(char *keyname, char *key, u_int *keylen); +int sed_write_key(char *keyname, char *key, u_int keylen); +#else +static inline +int sed_read_key(char *keyname, char *key, u_int *keylen) { + return -EOPNOTSUPP; +} +static inline +int sed_write_key(char *keyname, char *key, u_int keylen) { + return -EOPNOTSUPP; +} +#endif diff --git a/include/linux/sem.h b/include/linux/sem.h index 5608a500c43e..c4deefe42aeb 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -3,25 +3,17 @@ #define _LINUX_SEM_H #include <uapi/linux/sem.h> +#include <linux/sem_types.h> struct task_struct; -struct sem_undo_list; #ifdef CONFIG_SYSVIPC -struct sysv_sem { - struct sem_undo_list *undo_list; -}; - extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); extern void exit_sem(struct task_struct *tsk); #else -struct sysv_sem { - /* empty */ -}; - static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) { return 0; diff --git a/include/linux/sem_types.h b/include/linux/sem_types.h new file mode 100644 index 000000000000..73df1971a7ae --- /dev/null +++ b/include/linux/sem_types.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SEM_TYPES_H +#define _LINUX_SEM_TYPES_H + +struct sem_undo_list; + +struct sysv_sem { +#ifdef CONFIG_SYSVIPC + struct sem_undo_list *undo_list; +#endif +}; + +#endif /* _LINUX_SEM_TYPES_H */ diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index 515d7fcb9634..c44f4b47b945 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -14,19 +14,24 @@ * @buffer: pointer to the buffer * @size: size of the buffer * @len: the amount of data inside the buffer - * @readpos: The next position to read in the buffer. */ struct seq_buf { char *buffer; size_t size; size_t len; - loff_t readpos; }; +#define DECLARE_SEQ_BUF(NAME, SIZE) \ + struct seq_buf NAME = { \ + .buffer = (char[SIZE]) { 0 }, \ + .size = SIZE, \ + } + static inline void seq_buf_clear(struct seq_buf *s) { s->len = 0; - s->readpos = 0; + if (s->size) + s->buffer[0] = '\0'; } static inline void @@ -39,7 +44,7 @@ seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) /* * seq_buf have a buffer that might overflow. When this happens - * the len and size are set to be equal. + * len is set to be greater than size. */ static inline bool seq_buf_has_overflowed(struct seq_buf *s) @@ -72,8 +77,8 @@ static inline unsigned int seq_buf_used(struct seq_buf *s) } /** - * seq_buf_terminate - Make sure buffer is nul terminated - * @s: the seq_buf descriptor to terminate. + * seq_buf_str - get %NUL-terminated C string from seq_buf + * @s: the seq_buf handle * * This makes sure that the buffer in @s is nul terminated and * safe to read as a string. @@ -84,16 +89,20 @@ static inline unsigned int seq_buf_used(struct seq_buf *s) * * After this function is called, s->buffer is safe to use * in string operations. + * + * Returns @s->buf after making sure it is terminated. */ -static inline void seq_buf_terminate(struct seq_buf *s) +static inline const char *seq_buf_str(struct seq_buf *s) { if (WARN_ON(s->size == 0)) - return; + return ""; if (seq_buf_buffer_left(s)) s->buffer[s->len] = 0; else s->buffer[s->size - 1] = 0; + + return s->buffer; } /** @@ -143,7 +152,7 @@ extern __printf(2, 0) int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, - int cnt); + size_t start, int cnt); extern int seq_buf_puts(struct seq_buf *s, const char *str); extern int seq_buf_putc(struct seq_buf *s, unsigned char c); extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 386ab580b839..234bcdb1fba4 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -207,6 +207,21 @@ static const struct file_operations __name ## _fops = { \ .release = single_release, \ } +#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .write = __name ## _write, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + #define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 987a59d977c5..d90d8ee29d81 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -18,6 +18,7 @@ #include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/preempt.h> +#include <linux/seqlock_types.h> #include <linux/spinlock.h> #include <asm/processor.h> @@ -37,37 +38,6 @@ */ #define KCSAN_SEQLOCK_REGION_MAX 1000 -/* - * Sequence counters (seqcount_t) - * - * This is the raw counting mechanism, without any writer protection. - * - * Write side critical sections must be serialized and non-preemptible. - * - * If readers can be invoked from hardirq or softirq contexts, - * interrupts or bottom halves must also be respectively disabled before - * entering the write section. - * - * This mechanism can't be used if the protected data contains pointers, - * as the writer can invalidate a pointer that a reader is following. - * - * If the write serialization mechanism is one of the common kernel - * locking primitives, use a sequence counter with associated lock - * (seqcount_LOCKNAME_t) instead. - * - * If it's desired to automatically handle the sequence counter writer - * serialization and non-preemptibility requirements, use a sequential - * lock (seqlock_t) instead. - * - * See Documentation/locking/seqlock.rst - */ -typedef struct seqcount { - unsigned sequence; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} seqcount_t; - static inline void __seqcount_init(seqcount_t *s, const char *name, struct lock_class_key *key) { @@ -132,28 +102,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) */ /* - * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot - * disable preemption. It can lead to higher latencies, and the write side - * sections will not be able to acquire locks which become sleeping locks - * (e.g. spinlock_t). - * - * To remain preemptible while avoiding a possible livelock caused by the - * reader preempting the writer, use a different technique: let the reader - * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the - * case, acquire then release the associated LOCKNAME writer serialization - * lock. This will allow any possibly-preempted writer to make progress - * until the end of its writer serialization lock critical section. - * - * This lock-unlock technique must be implemented for all of PREEMPT_RT - * sleeping locks. See Documentation/locking/locktypes.rst - */ -#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) -#define __SEQ_LOCK(expr) expr -#else -#define __SEQ_LOCK(expr) -#endif - -/* * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated * @seqcount: The real sequence counter * @lock: Pointer to the associated lock @@ -191,22 +139,21 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t * @locktype: LOCKNAME canonical C data type * @preemptible: preemptibility of above locktype - * @lockmember: argument for lockdep_assert_held() - * @lockbase: associated lock release function (prefix only) - * @lock_acquire: associated lock acquisition function (full call) + * @lockbase: prefix for associated lock/unlock */ -#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \ -typedef struct seqcount_##lockname { \ - seqcount_t seqcount; \ - __SEQ_LOCK(locktype *lock); \ -} seqcount_##lockname##_t; \ - \ +#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ static __always_inline seqcount_t * \ __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \ { \ return &s->seqcount; \ } \ \ +static __always_inline const seqcount_t * \ +__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \ +{ \ + return &s->seqcount; \ +} \ + \ static __always_inline unsigned \ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ { \ @@ -216,7 +163,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ return seq; \ \ if (preemptible && unlikely(seq & 1)) { \ - __SEQ_LOCK(lock_acquire); \ + __SEQ_LOCK(lockbase##_lock(s->lock)); \ __SEQ_LOCK(lockbase##_unlock(s->lock)); \ \ /* \ @@ -242,7 +189,7 @@ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \ static __always_inline void \ __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \ { \ - __SEQ_LOCK(lockdep_assert_held(lockmember)); \ + __SEQ_LOCK(lockdep_assert_held(s->lock)); \ } /* @@ -254,6 +201,11 @@ static inline seqcount_t *__seqprop_ptr(seqcount_t *s) return s; } +static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s) +{ + return s; +} + static inline unsigned __seqprop_sequence(const seqcount_t *s) { return READ_ONCE(s->sequence); @@ -271,10 +223,11 @@ static inline void __seqprop_assert(const seqcount_t *s) #define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT) -SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock)) -SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock)) -SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock)) -SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock)) +SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) +SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) +SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) +SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) +#undef SEQCOUNT_LOCKNAME /* * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t @@ -294,19 +247,20 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) #define __seqprop_case(s, lockname, prop) \ - seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s)) + seqcount_##lockname##_t: __seqprop_##lockname##_##prop #define __seqprop(s, prop) _Generic(*(s), \ - seqcount_t: __seqprop_##prop((void *)(s)), \ + seqcount_t: __seqprop_##prop, \ __seqprop_case((s), raw_spinlock, prop), \ __seqprop_case((s), spinlock, prop), \ __seqprop_case((s), rwlock, prop), \ __seqprop_case((s), mutex, prop)) -#define seqprop_ptr(s) __seqprop(s, ptr) -#define seqprop_sequence(s) __seqprop(s, sequence) -#define seqprop_preemptible(s) __seqprop(s, preemptible) -#define seqprop_assert(s) __seqprop(s, assert) +#define seqprop_ptr(s) __seqprop(s, ptr)(s) +#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s) +#define seqprop_sequence(s) __seqprop(s, sequence)(s) +#define seqprop_preemptible(s) __seqprop(s, preemptible)(s) +#define seqprop_assert(s) __seqprop(s, assert)(s) /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier @@ -355,7 +309,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex */ #define read_seqcount_begin(s) \ ({ \ - seqcount_lockdep_reader_access(seqprop_ptr(s)); \ + seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \ raw_read_seqcount_begin(s); \ }) @@ -421,7 +375,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex * Return: true if a read section retry is required, else false */ #define __read_seqcount_retry(s, start) \ - do___read_seqcount_retry(seqprop_ptr(s), start) + do___read_seqcount_retry(seqprop_const_ptr(s), start) static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) { @@ -441,7 +395,7 @@ static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) * Return: true if a read section retry is required, else false */ #define read_seqcount_retry(s, start) \ - do_read_seqcount_retry(seqprop_ptr(s), start) + do_read_seqcount_retry(seqprop_const_ptr(s), start) static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) { @@ -512,8 +466,8 @@ do { \ static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) { - do_raw_write_seqcount_begin(s); seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); + do_raw_write_seqcount_begin(s); } /** @@ -574,7 +528,7 @@ static inline void do_write_seqcount_end(seqcount_t *s) * via WRITE_ONCE): a) to ensure the writes become visible to other threads * atomically, avoiding compiler optimizations; b) to document which writes are * meant to propagate to the reader critical section. This is necessary because - * neither writes before and after the barrier are enclosed in a seq-writer + * neither writes before nor after the barrier are enclosed in a seq-writer * critical section that would ensure readers are aware of ongoing writes:: * * seqcount_t seq; @@ -784,25 +738,6 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s) smp_wmb(); /* increment "sequence" before following stores */ } -/* - * Sequential locks (seqlock_t) - * - * Sequence counters with an embedded spinlock for writer serialization - * and non-preemptibility. - * - * For more info, see: - * - Comments on top of seqcount_t - * - Documentation/locking/seqlock.rst - */ -typedef struct { - /* - * Make sure that readers don't starve writers on PREEMPT_RT: use - * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). - */ - seqcount_spinlock_t seqcount; - spinlock_t lock; -} seqlock_t; - #define __SEQLOCK_UNLOCKED(lockname) \ { \ .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \ @@ -864,7 +799,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) } /* - * For all seqlock_t write side functions, use the the internal + * For all seqlock_t write side functions, use the internal * do_write_seqcount_begin() instead of generic write_seqcount_begin(). * This way, no redundant lockdep_assert_held() checks are added. */ diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h new file mode 100644 index 000000000000..dfdf43e3fa3d --- /dev/null +++ b/include/linux/seqlock_types.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SEQLOCK_TYPES_H +#define __LINUX_SEQLOCK_TYPES_H + +#include <linux/lockdep_types.h> +#include <linux/mutex_types.h> +#include <linux/spinlock_types.h> + +/* + * Sequence counters (seqcount_t) + * + * This is the raw counting mechanism, without any writer protection. + * + * Write side critical sections must be serialized and non-preemptible. + * + * If readers can be invoked from hardirq or softirq contexts, + * interrupts or bottom halves must also be respectively disabled before + * entering the write section. + * + * This mechanism can't be used if the protected data contains pointers, + * as the writer can invalidate a pointer that a reader is following. + * + * If the write serialization mechanism is one of the common kernel + * locking primitives, use a sequence counter with associated lock + * (seqcount_LOCKNAME_t) instead. + * + * If it's desired to automatically handle the sequence counter writer + * serialization and non-preemptibility requirements, use a sequential + * lock (seqlock_t) instead. + * + * See Documentation/locking/seqlock.rst + */ +typedef struct seqcount { + unsigned sequence; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} seqcount_t; + +/* + * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot + * disable preemption. It can lead to higher latencies, and the write side + * sections will not be able to acquire locks which become sleeping locks + * (e.g. spinlock_t). + * + * To remain preemptible while avoiding a possible livelock caused by the + * reader preempting the writer, use a different technique: let the reader + * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the + * case, acquire then release the associated LOCKNAME writer serialization + * lock. This will allow any possibly-preempted writer to make progress + * until the end of its writer serialization lock critical section. + * + * This lock-unlock technique must be implemented for all of PREEMPT_RT + * sleeping locks. See Documentation/locking/locktypes.rst + */ +#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) +#define __SEQ_LOCK(expr) expr +#else +#define __SEQ_LOCK(expr) +#endif + +#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ +typedef struct seqcount_##lockname { \ + seqcount_t seqcount; \ + __SEQ_LOCK(locktype *lock); \ +} seqcount_##lockname##_t; + +SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) +SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) +SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) +SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) +#undef SEQCOUNT_LOCKNAME + +/* + * Sequential locks (seqlock_t) + * + * Sequence counters with an embedded spinlock for writer serialization + * and non-preemptibility. + * + * For more info, see: + * - Comments on top of seqcount_t + * - Documentation/locking/seqlock.rst + */ +typedef struct { + /* + * Make sure that readers don't starve writers on PREEMPT_RT: use + * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). + */ + seqcount_spinlock_t seqcount; + spinlock_t lock; +} seqlock_t; + +#endif /* __LINUX_SEQLOCK_TYPES_H */ diff --git a/include/linux/serdev.h b/include/linux/serdev.h index f5f97fa25e8a..3fab88ba265e 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -27,7 +27,7 @@ struct serdev_device; * not sleep. */ struct serdev_device_ops { - int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); + ssize_t (*receive_buf)(struct serdev_device *, const u8 *, size_t); void (*write_wakeup)(struct serdev_device *); }; @@ -82,7 +82,7 @@ enum serdev_parity { * serdev controller structures */ struct serdev_controller_ops { - int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t); + ssize_t (*write_buf)(struct serdev_controller *, const u8 *, size_t); void (*write_flush)(struct serdev_controller *); int (*write_room)(struct serdev_controller *); int (*open)(struct serdev_controller *); @@ -99,12 +99,14 @@ struct serdev_controller_ops { /** * struct serdev_controller - interface to the serdev controller * @dev: Driver model representation of the device. + * @host: Serial port hardware controller device * @nr: number identifier for this controller/bus. * @serdev: Pointer to slave device for this controller. * @ops: Controller operations. */ struct serdev_controller { struct device dev; + struct device *host; unsigned int nr; struct serdev_device *serdev; const struct serdev_controller_ops *ops; @@ -167,7 +169,9 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *); int serdev_device_add(struct serdev_device *); void serdev_device_remove(struct serdev_device *); -struct serdev_controller *serdev_controller_alloc(struct device *, size_t); +struct serdev_controller *serdev_controller_alloc(struct device *host, + struct device *parent, + size_t size); int serdev_controller_add(struct serdev_controller *); void serdev_controller_remove(struct serdev_controller *); @@ -181,9 +185,9 @@ static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl serdev->ops->write_wakeup(serdev); } -static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl, - const unsigned char *data, - size_t count) +static inline ssize_t serdev_controller_receive_buf(struct serdev_controller *ctrl, + const u8 *data, + size_t count) { struct serdev_device *serdev = ctrl->serdev; @@ -200,13 +204,13 @@ void serdev_device_close(struct serdev_device *); int devm_serdev_device_open(struct device *, struct serdev_device *); unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); void serdev_device_set_flow_control(struct serdev_device *, bool); -int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); +int serdev_device_write_buf(struct serdev_device *, const u8 *, size_t); void serdev_device_wait_until_sent(struct serdev_device *, long); int serdev_device_get_tiocm(struct serdev_device *); int serdev_device_set_tiocm(struct serdev_device *, int, int); int serdev_device_break_ctl(struct serdev_device *serdev, int break_state); void serdev_device_write_wakeup(struct serdev_device *); -int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long); +ssize_t serdev_device_write(struct serdev_device *, const u8 *, size_t, long); void serdev_device_write_flush(struct serdev_device *); int serdev_device_write_room(struct serdev_device *); @@ -244,7 +248,7 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev } static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} static inline int serdev_device_write_buf(struct serdev_device *serdev, - const unsigned char *buf, + const u8 *buf, size_t count) { return -ENODEV; @@ -262,8 +266,9 @@ static inline int serdev_device_break_ctl(struct serdev_device *serdev, int brea { return -EOPNOTSUPP; } -static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf, - size_t count, unsigned long timeout) +static inline ssize_t serdev_device_write(struct serdev_device *sdev, + const u8 *buf, size_t count, + unsigned long timeout) { return -ENODEV; } @@ -311,11 +316,13 @@ struct tty_driver; #ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT struct device *serdev_tty_port_register(struct tty_port *port, + struct device *host, struct device *parent, struct tty_driver *drv, int idx); int serdev_tty_port_unregister(struct tty_port *port); #else static inline struct device *serdev_tty_port_register(struct tty_port *port, + struct device *host, struct device *parent, struct tty_driver *drv, int idx) { diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index bb6f073bc159..536b2581d3e2 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -588,6 +588,85 @@ struct uart_port { void *private_data; /* generic platform data pointer */ }; +/** + * uart_port_lock - Lock the UART port + * @up: Pointer to UART port structure + */ +static inline void uart_port_lock(struct uart_port *up) +{ + spin_lock(&up->lock); +} + +/** + * uart_port_lock_irq - Lock the UART port and disable interrupts + * @up: Pointer to UART port structure + */ +static inline void uart_port_lock_irq(struct uart_port *up) +{ + spin_lock_irq(&up->lock); +} + +/** + * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts + * @up: Pointer to UART port structure + * @flags: Pointer to interrupt flags storage + */ +static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags) +{ + spin_lock_irqsave(&up->lock, *flags); +} + +/** + * uart_port_trylock - Try to lock the UART port + * @up: Pointer to UART port structure + * + * Returns: True if lock was acquired, false otherwise + */ +static inline bool uart_port_trylock(struct uart_port *up) +{ + return spin_trylock(&up->lock); +} + +/** + * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts + * @up: Pointer to UART port structure + * @flags: Pointer to interrupt flags storage + * + * Returns: True if lock was acquired, false otherwise + */ +static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags) +{ + return spin_trylock_irqsave(&up->lock, *flags); +} + +/** + * uart_port_unlock - Unlock the UART port + * @up: Pointer to UART port structure + */ +static inline void uart_port_unlock(struct uart_port *up) +{ + spin_unlock(&up->lock); +} + +/** + * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts + * @up: Pointer to UART port structure + */ +static inline void uart_port_unlock_irq(struct uart_port *up) +{ + spin_unlock_irq(&up->lock); +} + +/** + * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts + * @up: Pointer to UART port structure + * @flags: The saved interrupt flags for restore + */ +static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags) +{ + spin_unlock_irqrestore(&up->lock, flags); +} + static inline int serial_port_in(struct uart_port *up, int offset) { return up->serial_in(up, offset); @@ -773,9 +852,9 @@ static inline unsigned long uart_fifo_timeout(struct uart_port *port) } /* Base timer interval for polling */ -static inline int uart_poll_timeout(struct uart_port *port) +static inline unsigned long uart_poll_timeout(struct uart_port *port) { - int timeout = uart_fifo_timeout(port); + unsigned long timeout = uart_fifo_timeout(port); return timeout > 6 ? (timeout / 2 - 2) : 1; } @@ -956,14 +1035,14 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) u8 sysrq_ch; if (!port->has_sysrq) { - spin_unlock(&port->lock); + uart_port_unlock(port); return; } sysrq_ch = port->sysrq_ch; port->sysrq_ch = 0; - spin_unlock(&port->lock); + uart_port_unlock(port); if (sysrq_ch) handle_sysrq(sysrq_ch); @@ -975,14 +1054,14 @@ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port u8 sysrq_ch; if (!port->has_sysrq) { - spin_unlock_irqrestore(&port->lock, flags); + uart_port_unlock_irqrestore(port, flags); return; } sysrq_ch = port->sysrq_ch; port->sysrq_ch = 0; - spin_unlock_irqrestore(&port->lock, flags); + uart_port_unlock_irqrestore(port, flags); if (sysrq_ch) handle_sysrq(sysrq_ch); @@ -998,12 +1077,12 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch) } static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { - spin_unlock(&port->lock); + uart_port_unlock(port); } static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, unsigned long flags) { - spin_unlock_irqrestore(&port->lock, flags); + uart_port_unlock_irqrestore(port, flags); } #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ diff --git a/include/linux/shm.h b/include/linux/shm.h index d8e69aed3d32..c55bef0538e5 100644 --- a/include/linux/shm.h +++ b/include/linux/shm.h @@ -2,12 +2,12 @@ #ifndef _LINUX_SHM_H_ #define _LINUX_SHM_H_ -#include <linux/list.h> +#include <linux/types.h> #include <asm/page.h> -#include <uapi/linux/shm.h> #include <asm/shmparam.h> struct file; +struct task_struct; #ifdef CONFIG_SYSVIPC struct sysv_shm { diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 6b0c626620f5..2caa6b86106a 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -23,18 +23,22 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ - pgoff_t fallocend; /* highest fallocate endindex */ - struct list_head shrinklist; /* shrinkable hpage inodes */ - struct list_head swaplist; /* chain of maybes on swap */ + union { + struct offset_ctx dir_offsets; /* stable directory offsets */ + struct { + struct list_head shrinklist; /* shrinkable hpage inodes */ + struct list_head swaplist; /* chain of maybes on swap */ + }; + }; + struct timespec64 i_crtime; /* file creation time */ struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ + pgoff_t fallocend; /* highest fallocate endindex */ + unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */ atomic_t stop_eviction; /* hold when working on inode */ - struct timespec64 i_crtime; /* file creation time */ - unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */ #ifdef CONFIG_TMPFS_QUOTA struct dquot *i_dquot[MAXQUOTAS]; #endif - struct offset_ctx dir_offsets; /* stable entry offsets */ struct inode vfs_inode; }; diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 224293b2dd06..1a00be90d93a 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -4,6 +4,25 @@ #include <linux/atomic.h> #include <linux/types.h> +#include <linux/refcount.h> +#include <linux/completion.h> + +#define SHRINKER_UNIT_BITS BITS_PER_LONG + +/* + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware + * shrinkers, which have elements charged to the memcg. + */ +struct shrinker_info_unit { + atomic_long_t nr_deferred[SHRINKER_UNIT_BITS]; + DECLARE_BITMAP(map, SHRINKER_UNIT_BITS); +}; + +struct shrinker_info { + struct rcu_head rcu; + int map_nr_max; + struct shrinker_info_unit *unit[]; +}; /* * This struct is used to pass information from page reclaim to the shrinkers. @@ -70,6 +89,19 @@ struct shrinker { int seeks; /* seeks to recreate an obj */ unsigned flags; + /* + * The reference count of this shrinker. Registered shrinker have an + * initial refcount of 1, then the lookup operations are now allowed + * to use it via shrinker_try_get(). Later in the unregistration step, + * the initial refcount will be discarded, and will free the shrinker + * asynchronously via RCU after its refcount reaches 0. + */ + refcount_t refcount; + struct completion done; /* use to wait for refcount to reach 0 */ + struct rcu_head rcu; + + void *private_data; + /* These are for internal use */ struct list_head list; #ifdef CONFIG_MEMCG @@ -86,48 +118,39 @@ struct shrinker { }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ -/* Flags */ -#define SHRINKER_REGISTERED (1 << 0) -#define SHRINKER_NUMA_AWARE (1 << 1) -#define SHRINKER_MEMCG_AWARE (1 << 2) +/* Internal flags */ +#define SHRINKER_REGISTERED BIT(0) +#define SHRINKER_ALLOCATED BIT(1) + +/* Flags for users to use */ +#define SHRINKER_NUMA_AWARE BIT(2) +#define SHRINKER_MEMCG_AWARE BIT(3) /* * It just makes sense when the shrinker is also MEMCG_AWARE for now, * non-MEMCG_AWARE shrinker should not have this flag set. */ -#define SHRINKER_NONSLAB (1 << 3) +#define SHRINKER_NONSLAB BIT(4) -extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker, - const char *fmt, ...); -extern void register_shrinker_prepared(struct shrinker *shrinker); -extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker, - const char *fmt, ...); -extern void unregister_shrinker(struct shrinker *shrinker); -extern void free_prealloced_shrinker(struct shrinker *shrinker); -extern void synchronize_shrinkers(void); +__printf(2, 3) +struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...); +void shrinker_register(struct shrinker *shrinker); +void shrinker_free(struct shrinker *shrinker); -#ifdef CONFIG_SHRINKER_DEBUG -extern int shrinker_debugfs_add(struct shrinker *shrinker); -extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, - int *debugfs_id); -extern void shrinker_debugfs_remove(struct dentry *debugfs_entry, - int debugfs_id); -extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker, - const char *fmt, ...); -#else /* CONFIG_SHRINKER_DEBUG */ -static inline int shrinker_debugfs_add(struct shrinker *shrinker) -{ - return 0; -} -static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, - int *debugfs_id) +static inline bool shrinker_try_get(struct shrinker *shrinker) { - *debugfs_id = -1; - return NULL; + return refcount_inc_not_zero(&shrinker->refcount); } -static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry, - int debugfs_id) + +static inline void shrinker_put(struct shrinker *shrinker) { + if (refcount_dec_and_test(&shrinker->refcount)) + complete(&shrinker->done); } + +#ifdef CONFIG_SHRINKER_DEBUG +extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker, + const char *fmt, ...); +#else /* CONFIG_SHRINKER_DEBUG */ static inline __printf(2, 3) int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) { diff --git a/include/linux/signal.h b/include/linux/signal.h index 3b98e7a28538..f19816832f05 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -3,6 +3,7 @@ #define _LINUX_SIGNAL_H #include <linux/bug.h> +#include <linux/list.h> #include <linux/signal_types.h> #include <linux/string.h> diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index a70b2bdbf4d9..caf4f7a59ab9 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -6,7 +6,7 @@ * Basic signal handling related data type definitions: */ -#include <linux/list.h> +#include <linux/types.h> #include <uapi/linux/signal.h> typedef struct kernel_siginfo { diff --git a/include/linux/sizes.h b/include/linux/sizes.h index 84aa448d8bb3..c3a00b967d18 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -47,8 +47,17 @@ #define SZ_8G _AC(0x200000000, ULL) #define SZ_16G _AC(0x400000000, ULL) #define SZ_32G _AC(0x800000000, ULL) +#define SZ_64G _AC(0x1000000000, ULL) +#define SZ_128G _AC(0x2000000000, ULL) +#define SZ_256G _AC(0x4000000000, ULL) +#define SZ_512G _AC(0x8000000000, ULL) #define SZ_1T _AC(0x10000000000, ULL) +#define SZ_2T _AC(0x20000000000, ULL) +#define SZ_4T _AC(0x40000000000, ULL) +#define SZ_8T _AC(0x80000000000, ULL) +#define SZ_16T _AC(0x100000000000, ULL) +#define SZ_32T _AC(0x200000000000, ULL) #define SZ_64T _AC(0x400000000000, ULL) #endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4174c4b82d13..2dde34c29203 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -295,7 +295,7 @@ struct nf_bridge_info { u8 bridged_dnat:1; u8 sabotage_in_done:1; __u16 frag_max_size; - struct net_device *physindev; + int physinif; /* always valid & non-NULL from FORWARD on, for physdev match */ struct net_device *physoutdev; @@ -566,6 +566,15 @@ struct ubuf_info_msgzc { int mm_account_pinned_pages(struct mmpin *mmp, size_t size); void mm_unaccount_pinned_pages(struct mmpin *mmp); +/* Preserve some data across TX submission and completion. + * + * Note, this state is stored in the driver. Extending the layout + * might need some special care. + */ +struct xsk_tx_metadata_compl { + __u64 *tx_timestamp; +}; + /* This data is invariant across clones and lives at * the end of the header data, ie. at skb->end. */ @@ -578,7 +587,10 @@ struct skb_shared_info { /* Warning: this field is not always filled in (UFO)! */ unsigned short gso_segs; struct sk_buff *frag_list; - struct skb_shared_hwtstamps hwtstamps; + union { + struct skb_shared_hwtstamps hwtstamps; + struct xsk_tx_metadata_compl xsk_meta; + }; unsigned int gso_type; u32 tskey; @@ -742,7 +754,6 @@ typedef unsigned char *sk_buff_data_t; * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL * @cb: Control buffer. Free for use by every layer. Put private vars here * @_skb_refdst: destination entry (with norefcount bit) - * @sp: the security path, used for xfrm * @len: Length of actual data * @data_len: Data length * @mac_len: Length of link layer header @@ -776,7 +787,6 @@ typedef unsigned char *sk_buff_data_t; * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue) * @_sk_redir: socket redirection information for skmsg * @_nfct: Associated connection, if any (with nfctinfo bits) - * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @skb_iif: ifindex of device we arrived on * @tc_index: Traffic control index * @hash: the packet hash @@ -1057,7 +1067,7 @@ struct sk_buff { refcount_t users; #ifdef CONFIG_SKB_EXTENSIONS - /* only useable after checking ->active_extensions != 0 */ + /* only usable after checking ->active_extensions != 0 */ struct skb_ext *extensions; #endif }; @@ -1309,7 +1319,7 @@ struct sk_buff_fclones { * * Returns true if skb is a fast clone, and its clone is not freed. * Some drivers call skb_orphan() in their ndo_start_xmit(), - * so we also check that this didnt happen. + * so we also check that didn't happen. */ static inline bool skb_fclone_busy(const struct sock *sk, const struct sk_buff *skb) @@ -2016,7 +2026,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) * Copy shared buffers into a new sk_buff. We effectively do COW on * packets to handle cases where we have a local reader and forward * and a couple of other messy ones. The normal one is tcpdumping - * a packet thats being forwarded. + * a packet that's being forwarded. */ /** @@ -3299,7 +3309,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, unsigned int order) { /* This piece of code contains several assumptions. - * 1. This is for device Rx, therefor a cold page is preferred. + * 1. This is for device Rx, therefore a cold page is preferred. * 2. The expectation is the user wants a compound page. * 3. If requesting a order 0 page it will not be compound * due to the check to see if order has a value in prep_new_page @@ -3679,6 +3689,9 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l return __skb_put_padto(skb, len, true); } +bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) + __must_check; + static inline int skb_add_data(struct sk_buff *skb, struct iov_iter *from, int copy) { @@ -3994,6 +4007,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features unsigned int offset); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len); +int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev); int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); @@ -4232,10 +4246,13 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, { const void *a = skb_metadata_end(skb_a); const void *b = skb_metadata_end(skb_b); - /* Using more efficient varaiant than plain call to memcmp(). */ -#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 u64 diffs = 0; + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + BITS_PER_LONG != 64) + goto slow; + + /* Using more efficient variant than plain call to memcmp(). */ switch (meta_len) { #define __it(x, op) (x -= sizeof(u##op)) #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) @@ -4255,11 +4272,11 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, fallthrough; case 4: diffs |= __it_diff(a, b, 32); break; + default: +slow: + return memcmp(a - meta_len, b - meta_len, meta_len); } return diffs; -#else - return memcmp(a - meta_len, b - meta_len, meta_len); -#endif } static inline bool skb_metadata_differs(const struct sk_buff *skb_a, diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index c1637515a8a4..e65ec3fd2799 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -100,12 +100,18 @@ struct sk_psock { void (*saved_close)(struct sock *sk, long timeout); void (*saved_write_space)(struct sock *sk); void (*saved_data_ready)(struct sock *sk); + /* psock_update_sk_prot may be called with restore=false many times + * so the handler must be safe for this case. It will be called + * exactly once with restore=true when the psock is being destroyed + * and psock refcnt is zero, but before an RCU grace period. + */ int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, bool restore); struct proto *sk_proto; struct mutex work_mutex; struct sk_psock_work_state work_state; struct delayed_work work; + struct sock *sk_pair; struct rcu_work rwork; }; @@ -499,12 +505,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock) return !!psock->saved_data_ready; } -static inline bool sk_is_udp(const struct sock *sk) -{ - return sk->sk_type == SOCK_DGRAM && - sk->sk_protocol == IPPROTO_UDP; -} - #if IS_ENABLED(CONFIG_NET_SOCK_MSG) #define BPF_F_STRPARSER (1UL << 1) diff --git a/include/linux/slab.h b/include/linux/slab.h index 8228d1276a2f..b5f5ee8308d0 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -24,7 +24,7 @@ /* * Flags to pass to kmem_cache_create(). - * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. + * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op */ /* DEBUG: Perform (expensive) checks on alloc/free */ #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) @@ -245,8 +245,9 @@ DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) size_t ksize(const void *objp); #ifdef CONFIG_PRINTK -bool kmem_valid_obj(void *object); -void kmem_dump_obj(void *object); +bool kmem_dump_obj(void *object); +#else +static inline bool kmem_dump_obj(void *object) { return false; } #endif /* @@ -301,25 +302,15 @@ static inline unsigned int arch_slab_minalign(void) * Kmalloc array related definitions */ -#ifdef CONFIG_SLAB /* - * SLAB and SLUB directly allocates requests fitting in to an order-1 page + * SLUB directly allocates requests fitting in to an order-1 page * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) -#ifndef KMALLOC_SHIFT_LOW -#define KMALLOC_SHIFT_LOW 5 -#endif -#endif - -#ifdef CONFIG_SLUB -#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) +#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif -#endif /* Maximum allocatable size */ #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) @@ -763,6 +754,8 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) __realloc_size(3); extern void kvfree(const void *addr); +DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T)) + extern void kvfree_sensitive(const void *addr, size_t len); unsigned int kmem_cache_size(struct kmem_cache *s); @@ -785,12 +778,4 @@ size_t kmalloc_size_roundup(size_t size); void __init kmem_cache_init_late(void); -#if defined(CONFIG_SMP) && defined(CONFIG_SLAB) -int slab_prepare_cpu(unsigned int cpu); -int slab_dead_cpu(unsigned int cpu); -#else -#define slab_prepare_cpu NULL -#define slab_dead_cpu NULL -#endif - #endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h deleted file mode 100644 index a61e7d55d0d3..000000000000 --- a/include/linux/slab_def.h +++ /dev/null @@ -1,124 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_SLAB_DEF_H -#define _LINUX_SLAB_DEF_H - -#include <linux/kfence.h> -#include <linux/reciprocal_div.h> - -/* - * Definitions unique to the original Linux SLAB allocator. - */ - -struct kmem_cache { - struct array_cache __percpu *cpu_cache; - -/* 1) Cache tunables. Protected by slab_mutex */ - unsigned int batchcount; - unsigned int limit; - unsigned int shared; - - unsigned int size; - struct reciprocal_value reciprocal_buffer_size; -/* 2) touched by every alloc & free from the backend */ - - slab_flags_t flags; /* constant flags */ - unsigned int num; /* # of objs per slab */ - -/* 3) cache_grow/shrink */ - /* order of pgs per slab (2^n) */ - unsigned int gfporder; - - /* force GFP flags, e.g. GFP_DMA */ - gfp_t allocflags; - - size_t colour; /* cache colouring range */ - unsigned int colour_off; /* colour offset */ - unsigned int freelist_size; - - /* constructor func */ - void (*ctor)(void *obj); - -/* 4) cache creation/removal */ - const char *name; - struct list_head list; - int refcount; - int object_size; - int align; - -/* 5) statistics */ -#ifdef CONFIG_DEBUG_SLAB - unsigned long num_active; - unsigned long num_allocations; - unsigned long high_mark; - unsigned long grown; - unsigned long reaped; - unsigned long errors; - unsigned long max_freeable; - unsigned long node_allocs; - unsigned long node_frees; - unsigned long node_overflow; - atomic_t allochit; - atomic_t allocmiss; - atomic_t freehit; - atomic_t freemiss; - - /* - * If debugging is enabled, then the allocator can add additional - * fields and/or padding to every object. 'size' contains the total - * object size including these internal fields, while 'obj_offset' - * and 'object_size' contain the offset to the user object and its - * size. - */ - int obj_offset; -#endif /* CONFIG_DEBUG_SLAB */ - -#ifdef CONFIG_KASAN_GENERIC - struct kasan_cache kasan_info; -#endif - -#ifdef CONFIG_SLAB_FREELIST_RANDOM - unsigned int *random_seq; -#endif - -#ifdef CONFIG_HARDENED_USERCOPY - unsigned int useroffset; /* Usercopy region offset */ - unsigned int usersize; /* Usercopy region size */ -#endif - - struct kmem_cache_node *node[MAX_NUMNODES]; -}; - -static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab, - void *x) -{ - void *object = x - (x - slab->s_mem) % cache->size; - void *last_object = slab->s_mem + (cache->num - 1) * cache->size; - - if (unlikely(object > last_object)) - return last_object; - else - return object; -} - -/* - * We want to avoid an expensive divide : (offset / cache->size) - * Using the fact that size is a constant for a particular cache, - * we can replace (offset / cache->size) by - * reciprocal_divide(offset, cache->reciprocal_buffer_size) - */ -static inline unsigned int obj_to_index(const struct kmem_cache *cache, - const struct slab *slab, void *obj) -{ - u32 offset = (obj - slab->s_mem); - return reciprocal_divide(offset, cache->reciprocal_buffer_size); -} - -static inline int objs_per_slab(const struct kmem_cache *cache, - const struct slab *slab) -{ - if (is_kfence_address(slab_address(slab))) - return 1; - return cache->num; -} - -#endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h deleted file mode 100644 index deb90cf4bffb..000000000000 --- a/include/linux/slub_def.h +++ /dev/null @@ -1,204 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_SLUB_DEF_H -#define _LINUX_SLUB_DEF_H - -/* - * SLUB : A Slab allocator without object queues. - * - * (C) 2007 SGI, Christoph Lameter - */ -#include <linux/kfence.h> -#include <linux/kobject.h> -#include <linux/reciprocal_div.h> -#include <linux/local_lock.h> - -enum stat_item { - ALLOC_FASTPATH, /* Allocation from cpu slab */ - ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ - FREE_FASTPATH, /* Free to cpu slab */ - FREE_SLOWPATH, /* Freeing not to cpu slab */ - FREE_FROZEN, /* Freeing to frozen slab */ - FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ - FREE_REMOVE_PARTIAL, /* Freeing removes last object */ - ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ - ALLOC_SLAB, /* Cpu slab acquired from page allocator */ - ALLOC_REFILL, /* Refill cpu slab from slab freelist */ - ALLOC_NODE_MISMATCH, /* Switching cpu slab */ - FREE_SLAB, /* Slab freed to the page allocator */ - CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ - DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ - DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ - DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ - DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ - DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ - DEACTIVATE_BYPASS, /* Implicit deactivation */ - ORDER_FALLBACK, /* Number of times fallback was necessary */ - CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ - CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ - CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ - CPU_PARTIAL_FREE, /* Refill cpu partial on free */ - CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ - CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ - NR_SLUB_STAT_ITEMS -}; - -#ifndef CONFIG_SLUB_TINY -/* - * When changing the layout, make sure freelist and tid are still compatible - * with this_cpu_cmpxchg_double() alignment requirements. - */ -struct kmem_cache_cpu { - union { - struct { - void **freelist; /* Pointer to next available object */ - unsigned long tid; /* Globally unique transaction id */ - }; - freelist_aba_t freelist_tid; - }; - struct slab *slab; /* The slab from which we are allocating */ -#ifdef CONFIG_SLUB_CPU_PARTIAL - struct slab *partial; /* Partially allocated frozen slabs */ -#endif - local_lock_t lock; /* Protects the fields above */ -#ifdef CONFIG_SLUB_STATS - unsigned stat[NR_SLUB_STAT_ITEMS]; -#endif -}; -#endif /* CONFIG_SLUB_TINY */ - -#ifdef CONFIG_SLUB_CPU_PARTIAL -#define slub_percpu_partial(c) ((c)->partial) - -#define slub_set_percpu_partial(c, p) \ -({ \ - slub_percpu_partial(c) = (p)->next; \ -}) - -#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c)) -#else -#define slub_percpu_partial(c) NULL - -#define slub_set_percpu_partial(c, p) - -#define slub_percpu_partial_read_once(c) NULL -#endif // CONFIG_SLUB_CPU_PARTIAL - -/* - * Word size structure that can be atomically updated or read and that - * contains both the order and the number of objects that a slab of the - * given order would contain. - */ -struct kmem_cache_order_objects { - unsigned int x; -}; - -/* - * Slab cache management. - */ -struct kmem_cache { -#ifndef CONFIG_SLUB_TINY - struct kmem_cache_cpu __percpu *cpu_slab; -#endif - /* Used for retrieving partial slabs, etc. */ - slab_flags_t flags; - unsigned long min_partial; - unsigned int size; /* The size of an object including metadata */ - unsigned int object_size;/* The size of an object without metadata */ - struct reciprocal_value reciprocal_size; - unsigned int offset; /* Free pointer offset */ -#ifdef CONFIG_SLUB_CPU_PARTIAL - /* Number of per cpu partial objects to keep around */ - unsigned int cpu_partial; - /* Number of per cpu partial slabs to keep around */ - unsigned int cpu_partial_slabs; -#endif - struct kmem_cache_order_objects oo; - - /* Allocation and freeing of slabs */ - struct kmem_cache_order_objects min; - gfp_t allocflags; /* gfp flags to use on each alloc */ - int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(void *); - unsigned int inuse; /* Offset to metadata */ - unsigned int align; /* Alignment */ - unsigned int red_left_pad; /* Left redzone padding size */ - const char *name; /* Name (only for display!) */ - struct list_head list; /* List of slab caches */ -#ifdef CONFIG_SYSFS - struct kobject kobj; /* For sysfs */ -#endif -#ifdef CONFIG_SLAB_FREELIST_HARDENED - unsigned long random; -#endif - -#ifdef CONFIG_NUMA - /* - * Defragmentation by allocating from a remote node. - */ - unsigned int remote_node_defrag_ratio; -#endif - -#ifdef CONFIG_SLAB_FREELIST_RANDOM - unsigned int *random_seq; -#endif - -#ifdef CONFIG_KASAN_GENERIC - struct kasan_cache kasan_info; -#endif - -#ifdef CONFIG_HARDENED_USERCOPY - unsigned int useroffset; /* Usercopy region offset */ - unsigned int usersize; /* Usercopy region size */ -#endif - - struct kmem_cache_node *node[MAX_NUMNODES]; -}; - -#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY) -#define SLAB_SUPPORTS_SYSFS -void sysfs_slab_unlink(struct kmem_cache *); -void sysfs_slab_release(struct kmem_cache *); -#else -static inline void sysfs_slab_unlink(struct kmem_cache *s) -{ -} -static inline void sysfs_slab_release(struct kmem_cache *s) -{ -} -#endif - -void *fixup_red_left(struct kmem_cache *s, void *p); - -static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab, - void *x) { - void *object = x - (x - slab_address(slab)) % cache->size; - void *last_object = slab_address(slab) + - (slab->objects - 1) * cache->size; - void *result = (unlikely(object > last_object)) ? last_object : object; - - result = fixup_red_left(cache, result); - return result; -} - -/* Determine object index from a given position */ -static inline unsigned int __obj_to_index(const struct kmem_cache *cache, - void *addr, void *obj) -{ - return reciprocal_divide(kasan_reset_tag(obj) - addr, - cache->reciprocal_size); -} - -static inline unsigned int obj_to_index(const struct kmem_cache *cache, - const struct slab *slab, void *obj) -{ - if (is_kfence_address(obj)) - return 0; - return __obj_to_index(cache, slab_address(slab), obj); -} - -static inline int objs_per_slab(const struct kmem_cache *cache, - const struct slab *slab) -{ - return slab->objects; -} -#endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 91ea4a67f8ca..e87520dc2959 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, void *info, bool wait, const struct cpumask *mask); -int smp_call_function_single_async(int cpu, struct __call_single_data *csd); +int smp_call_function_single_async(int cpu, call_single_data_t *csd); /* * Cpus stopping functions in panic. All have default weak definitions. diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h index fc456f75c131..8c9ca857ccf6 100644 --- a/include/linux/soc/apple/rtkit.h +++ b/include/linux/soc/apple/rtkit.h @@ -161,24 +161,6 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message, struct completion *completion, bool atomic); /* - * Send a message to the given endpoint and wait until it has been submitted - * to the hardware FIFO. - * Will return zero on success and a negative error code on failure - * (e.g. -ETIME when the message couldn't be written within the given - * timeout) - * - * @rtk: RTKit reference - * @ep: target endpoint - * @message: message to be sent - * @timeout: timeout in milliseconds to allow the message transmission - * to be completed - * @atomic: if set to true this function can be called from atomic - * context. - */ -int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message, - unsigned long timeout, bool atomic); - -/* * Process incoming messages in atomic context. * This only guarantees that messages arrive as far as the recv_message_early * callback; drivers expecting to handle incoming messages synchronously diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 07f67b3d8e97..6c6cccc848f4 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -2,6 +2,47 @@ #ifndef __SOC_MEDIATEK_INFRACFG_H #define __SOC_MEDIATEK_INFRACFG_H +#define MT8365_INFRA_TOPAXI_PROTECTEN_STA1 0x228 +#define MT8365_INFRA_TOPAXI_PROTECTEN_SET 0x2a0 +#define MT8365_INFRA_TOPAXI_PROTECTEN_CLR 0x2a4 +#define MT8365_INFRA_TOPAXI_PROTECTEN_MM_M0 BIT(1) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MDMCU_M1 BIT(2) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MMAPB_S BIT(6) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_0 BIT(10) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_1 BIT(11) +#define MT8365_INFRA_TOPAXI_PROTECTEN_AP2CONN_AHB BIT(13) +#define MT8365_INFRA_TOPAXI_PROTECTEN_CONN2INFRA_AHB BIT(14) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MFG_M0 BIT(21) +#define MT8365_INFRA_TOPAXI_PROTECTEN_INFRA2MFG BIT(22) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_STA1 0x258 +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_SET 0x2a8 +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CLR 0x2ac +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU2AP BIT(2) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_0 BIT(16) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_1 BIT(17) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CONN2INFRA_AXI_GALS_MST BIT(18) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CAM2MM_AXI_GALS_MST BIT(19) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU_CBIP_GALS_MST BIT(20) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_INFRA2CONN_AHB_GALS_SLV BIT(21) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_INFRA_GALS_ADB BIT(24) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_MP1_L2C_AFIFO BIT(27) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_AUDIO_M BIT(28) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_M BIT(30) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_S BIT(31) + +#define MT8365_INFRA_NAO_TOPAXI_SI0_STA 0x0 +#define MT8365_INFRA_NAO_TOPAXI_SI0_CTRL_UPDATED BIT(24) +#define MT8365_INFRA_NAO_TOPAXI_SI2_STA 0x28 +#define MT8365_INFRA_NAO_TOPAXI_SI2_CTRL_UPDATED BIT(14) +#define MT8365_INFRA_TOPAXI_SI0_CTL 0x200 +#define MT8365_INFRA_TOPAXI_SI0_WAY_EN_MMAPB_S BIT(6) +#define MT8365_INFRA_TOPAXI_SI2_CTL 0x234 +#define MT8365_INFRA_TOPAXI_SI2_WAY_EN_PERI_M1 BIT(5) + +#define MT8365_SMI_COMMON_CLAMP_EN 0x3c0 +#define MT8365_SMI_COMMON_CLAMP_EN_SET 0x3c4 +#define MT8365_SMI_COMMON_CLAMP_EN_CLR 0x3c8 + #define MT8195_TOP_AXI_PROT_EN_STA1 0x228 #define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258 #define MT8195_TOP_AXI_PROT_EN_SET 0x2a0 diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h index 2475ef914746..4885b065b849 100644 --- a/include/linux/soc/mediatek/mtk-mmsys.h +++ b/include/linux/soc/mediatek/mtk-mmsys.h @@ -62,6 +62,14 @@ enum mtk_ddp_comp_id { DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_OVL1, + DDP_COMPONENT_PADDING0, + DDP_COMPONENT_PADDING1, + DDP_COMPONENT_PADDING2, + DDP_COMPONENT_PADDING3, + DDP_COMPONENT_PADDING4, + DDP_COMPONENT_PADDING5, + DDP_COMPONENT_PADDING6, + DDP_COMPONENT_PADDING7, DDP_COMPONENT_POSTMASK0, DDP_COMPONENT_PWM0, DDP_COMPONENT_PWM1, diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h index b2b28180dff7..a476648858a6 100644 --- a/include/linux/soc/mediatek/mtk_wed.h +++ b/include/linux/soc/mediatek/mtk_wed.h @@ -10,6 +10,7 @@ #define MTK_WED_TX_QUEUES 2 #define MTK_WED_RX_QUEUES 2 +#define MTK_WED_RX_PAGE_QUEUES 3 #define WED_WO_STA_REC 0x6 @@ -45,7 +46,7 @@ enum mtk_wed_wo_cmd { MTK_WED_WO_CMD_WED_END }; -struct mtk_rxbm_desc { +struct mtk_wed_bm_desc { __le32 buf0; __le32 token; } __packed __aligned(4); @@ -76,6 +77,11 @@ struct mtk_wed_wo_rx_stats { __le32 rx_drop_cnt; }; +struct mtk_wed_buf { + void *p; + dma_addr_t phy_addr; +}; + struct mtk_wed_device { #ifdef CONFIG_NET_MEDIATEK_SOC_WED const struct mtk_wed_ops *ops; @@ -94,17 +100,20 @@ struct mtk_wed_device { struct mtk_wed_ring txfree_ring; struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES]; + struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES]; + struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES]; + struct mtk_wed_ring ind_cmd_ring; struct { int size; - void **pages; + struct mtk_wed_buf *pages; struct mtk_wdma_desc *desc; dma_addr_t desc_phys; } tx_buf_ring; struct { int size; - struct mtk_rxbm_desc *desc; + struct mtk_wed_bm_desc *desc; dma_addr_t desc_phys; } rx_buf_ring; @@ -114,6 +123,13 @@ struct mtk_wed_device { dma_addr_t fdbk_phys; } rro; + struct { + int size; + struct mtk_wed_buf *pages; + struct mtk_wed_bm_desc *desc; + dma_addr_t desc_phys; + } hw_rro; + /* filled by driver: */ struct { union { @@ -123,6 +139,7 @@ struct mtk_wed_device { enum mtk_wed_bus_tye bus_type; void __iomem *base; u32 phy_base; + u32 id; u32 wpdma_phys; u32 wpdma_int; @@ -131,18 +148,35 @@ struct mtk_wed_device { u32 wpdma_txfree; u32 wpdma_rx_glo; u32 wpdma_rx; + u32 wpdma_rx_rro[MTK_WED_RX_QUEUES]; + u32 wpdma_rx_pg; bool wcid_512; + bool hw_rro; + bool msi; u16 token_start; unsigned int nbuf; unsigned int rx_nbuf; unsigned int rx_npkt; unsigned int rx_size; + unsigned int amsdu_max_len; u8 tx_tbit[MTK_WED_TX_QUEUES]; u8 rx_tbit[MTK_WED_RX_QUEUES]; + u8 rro_rx_tbit[MTK_WED_RX_QUEUES]; + u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES]; u8 txfree_tbit; + u8 amsdu_max_subframes; + + struct { + u8 se_group_nums; + u16 win_size; + u16 particular_sid; + u32 ack_sn_addr; + dma_addr_t particular_se_phys; + dma_addr_t addr_elem_phys[1024]; + } ind_cmd; u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); int (*offload_enable)(struct mtk_wed_device *wed); @@ -182,6 +216,14 @@ struct mtk_wed_ops { void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask); int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev, enum tc_setup_type type, void *type_data); + void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask, + bool reset); + void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs); + void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs); + int (*ind_rx_ring_setup)(struct mtk_wed_device *dev, + void __iomem *regs); }; extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; @@ -206,16 +248,27 @@ mtk_wed_device_attach(struct mtk_wed_device *dev) return ret; } -static inline bool -mtk_wed_get_rx_capa(struct mtk_wed_device *dev) +static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev) { #ifdef CONFIG_NET_MEDIATEK_SOC_WED + if (dev->version == 3) + return dev->wlan.hw_rro; + return dev->version != 1; #else return false; #endif } +static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev) +{ +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + return dev->version == 3; +#else + return false; +#endif +} + #ifdef CONFIG_NET_MEDIATEK_SOC_WED #define mtk_wed_device_active(_dev) !!(_dev)->ops #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) @@ -242,6 +295,15 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev) #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev) #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data) +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \ + (_dev)->ops->start_hw_rro(_dev, _mask, _reset) +#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \ + (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs) +#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \ + (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs) +#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \ + (_dev)->ops->ind_rx_ring_setup(_dev, _regs) + #else static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) { @@ -261,6 +323,10 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) #define mtk_wed_device_stop(_dev) do {} while (0) #define mtk_wed_device_dma_reset(_dev) do {} while (0) #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0) +#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV #endif #endif diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 93417ba1ead4..1a886666bbb6 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -30,7 +30,7 @@ #define LLCC_NPU 23 #define LLCC_WLHW 24 #define LLCC_PIMEM 25 -#define LLCC_DRE 26 +#define LLCC_ECC 26 #define LLCC_CVP 28 #define LLCC_MODPE 29 #define LLCC_APTCM 30 diff --git a/include/linux/socket.h b/include/linux/socket.h index 39b74d83c7c4..cfcb7e2c3813 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -383,6 +383,7 @@ struct ucred { #define SOL_MPTCP 284 #define SOL_MCTP 285 #define SOL_SMC 286 +#define SOL_VSOCK 287 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index bae5e2369b4f..307961b41541 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -55,6 +55,29 @@ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) return copy_from_sockptr_offset(dst, src, 0, size); } +static inline int copy_struct_from_sockptr(void *dst, size_t ksize, + sockptr_t src, size_t usize) +{ + size_t size = min(ksize, usize); + size_t rest = max(ksize, usize) - size; + + if (!sockptr_is_kernel(src)) + return copy_struct_from_user(dst, ksize, src.user, size); + + if (usize < ksize) { + memset(dst + size, 0, rest); + } else if (usize > ksize) { + char *p = src.kernel; + + while (rest--) { + if (*p++) + return -E2BIG; + } + } + memcpy(dst, src.kernel, size); + return 0; +} + static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset, const void *src, size_t size) { diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 4f3d14bb1538..66f814b63a43 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -886,7 +886,8 @@ struct sdw_master_ops { * struct sdw_bus - SoundWire bus * @dev: Shortcut to &bus->md->dev to avoid changing the entire code. * @md: Master device - * @link_id: Link id number, can be 0 to N, unique for each Master + * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used. + * @link_id: Link id number, can be 0 to N, unique for each Controller * @id: bus system-wide unique id * @slaves: list of Slaves on this bus * @assigned: Bitmap for Slave device numbers. @@ -918,6 +919,7 @@ struct sdw_master_ops { struct sdw_bus { struct device *dev; struct sdw_master_device *md; + int controller_id; unsigned int link_id; int id; struct list_head slaves; @@ -1040,7 +1042,7 @@ int sdw_compute_params(struct sdw_bus *bus); int sdw_stream_add_master(struct sdw_bus *bus, struct sdw_stream_config *stream_config, - struct sdw_port_config *port_config, + const struct sdw_port_config *port_config, unsigned int num_ports, struct sdw_stream_runtime *stream); int sdw_stream_remove_master(struct sdw_bus *bus, @@ -1062,7 +1064,7 @@ void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id int sdw_stream_add_slave(struct sdw_slave *slave, struct sdw_stream_config *stream_config, - struct sdw_port_config *port_config, + const struct sdw_port_config *port_config, unsigned int num_ports, struct sdw_stream_runtime *stream); int sdw_stream_remove_slave(struct sdw_slave *slave, @@ -1084,7 +1086,7 @@ int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val); static inline int sdw_stream_add_slave(struct sdw_slave *slave, struct sdw_stream_config *stream_config, - struct sdw_port_config *port_config, + const struct sdw_port_config *port_config, unsigned int num_ports, struct sdw_stream_runtime *stream) { diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 6b0a7dc48a4b..f866d5c8ed32 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -233,6 +233,8 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem) * limitations) * @supports_op: check if an operation is supported by the controller * @exec_op: execute a SPI memory operation + * not all driver provides supports_op(), so it can return -EOPNOTSUPP + * if the op is not supported by the driver/controller * @get_name: get a custom name for the SPI mem device from the controller. * This might be needed if the controller driver has been ported * to use the SPI mem layer and a custom name is used to keep diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 7f8b478fdeb3..600fbd5daf68 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -20,6 +20,9 @@ #include <uapi/linux/spi/spi.h> +/* Max no. of CS supported per spi device */ +#define SPI_CS_CNT_MAX 16 + struct dma_chan; struct software_node; struct ptp_system_timestamp; @@ -132,7 +135,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, * @max_speed_hz: Maximum clock rate to be used with this chip * (on this board); may be changed by the device's driver. * The spi_transfer.speed_hz can override this for each transfer. - * @chip_select: Chipselect, distinguishing chips handled by @controller. + * @chip_select: Array of physical chipselect, spi->chipselect[i] gives + * the corresponding physical CS for logical CS i. * @mode: The spi mode defines how data is clocked out and in. * This may be changed by the device's driver. * The "active low" default for chipselect mode can be overridden @@ -157,8 +161,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, * the device will bind to the named driver and only the named driver. * Do not set directly, because core frees it; use driver_set_override() to * set or clear it. - * @cs_gpiod: GPIO descriptor of the chipselect line (optional, NULL when - * not using a GPIO line) + * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines + * (optional, NULL when not using a GPIO line) * @word_delay: delay to be inserted between consecutive * words of a transfer * @cs_setup: delay to be introduced by the controller after CS is asserted @@ -167,6 +171,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, * deasserted. If @cs_change_delay is used from @spi_transfer, then the * two delays will be added up. * @pcpu_statistics: statistics for the spi_device + * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array * * A @spi_device is used to interchange data between an SPI slave * (usually a discrete chip) and CPU memory. @@ -182,7 +187,7 @@ struct spi_device { struct spi_controller *controller; struct spi_controller *master; /* Compatibility layer */ u32 max_speed_hz; - u8 chip_select; + u8 chip_select[SPI_CS_CNT_MAX]; u8 bits_per_word; bool rt; #define SPI_NO_TX BIT(31) /* No transmit wire */ @@ -213,7 +218,7 @@ struct spi_device { void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; - struct gpio_desc *cs_gpiod; /* Chip select GPIO descriptor */ + struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */ struct spi_delay word_delay; /* Inter-word delay */ /* CS delays */ struct spi_delay cs_setup; @@ -223,6 +228,13 @@ struct spi_device { /* The statistics */ struct spi_statistics __percpu *pcpu_statistics; + /* Bit mask of the chipselect(s) that the driver need to use from + * the chipselect array.When the controller is capable to handle + * multiple chip selects & memories are connected in parallel + * then more than one bit need to be set in cs_index_mask. + */ + u32 cs_index_mask : SPI_CS_CNT_MAX; + /* * Likely need more hooks for more protocol options affecting how * the controller talks to each chip, like: @@ -279,22 +291,33 @@ static inline void *spi_get_drvdata(const struct spi_device *spi) static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx) { - return spi->chip_select; + return spi->chip_select[idx]; } static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect) { - spi->chip_select = chipselect; + spi->chip_select[idx] = chipselect; } static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx) { - return spi->cs_gpiod; + return spi->cs_gpiod[idx]; } static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod) { - spi->cs_gpiod = csgpiod; + spi->cs_gpiod[idx] = csgpiod; +} + +static inline bool spi_is_csgpiod(struct spi_device *spi) +{ + u8 idx; + + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + if (spi_get_csgpiod(spi, idx)) + return true; + } + return false; } /** @@ -399,6 +422,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @bus_lock_spinlock: spinlock for SPI bus locking * @bus_lock_mutex: mutex for exclusion of multiple callers * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use + * @multi_cs_cap: indicates that the SPI Controller can assert/de-assert + * more than one chip select at once. * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This * must fail if an unrecognized or unsupported mode is requested. @@ -461,10 +486,13 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * - return 1 if the transfer is still in progress. When * the driver is finished with this transfer it must * call spi_finalize_current_transfer() so the subsystem - * can issue the next transfer. Note: transfer_one and - * transfer_one_message are mutually exclusive; when both - * are set, the generic subsystem does not call your - * transfer_one callback. + * can issue the next transfer. If the transfer fails, the + * driver must set the flag SPI_TRANS_FAIL_IO to + * spi_transfer->error first, before calling + * spi_finalize_current_transfer(). + * Note: transfer_one and transfer_one_message are mutually + * exclusive; when both are set, the generic subsystem does + * not call your transfer_one callback. * @handle_err: the subsystem calls the driver to handle an error that occurs * in the generic implementation of transfer_one_message(). * @mem_ops: optimized/dedicated operations for interactions with SPI memory. @@ -566,6 +594,12 @@ struct spi_controller { #define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */ #define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */ #define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */ +#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */ + /* + * The spi-controller has multi chip select capability and can + * assert/de-assert more than one chip select at once. + */ +#define SPI_CONTROLLER_MULTI_CS BIT(7) /* Flag indicating if the allocation of this struct is devres-managed */ bool devm_allocated; @@ -676,7 +710,8 @@ struct spi_controller { bool rt; bool auto_runtime_pm; bool cur_msg_mapped; - char last_cs; + char last_cs[SPI_CS_CNT_MAX]; + char last_cs_index_mask; bool last_cs_mode_high; bool fallback; struct completion xfer_completion; @@ -867,6 +902,7 @@ extern int devm_spi_register_controller(struct device *dev, extern void spi_unregister_controller(struct spi_controller *ctlr); #if IS_ENABLED(CONFIG_ACPI) +extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev); extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, struct acpi_device *adev, int index); @@ -1038,6 +1074,7 @@ struct spi_transfer { unsigned len; #define SPI_TRANS_FAIL_NO_START BIT(0) +#define SPI_TRANS_FAIL_IO BIT(1) u16 error; dma_addr_t tx_dma; @@ -1086,8 +1123,6 @@ struct spi_transfer { * @state: for use by whichever driver currently owns the message * @resources: for resource management when the SPI message is processed * @prepared: spi_prepare_message was called for the this message - * @t: for use with spi_message_alloc() when message and transfers have - * been allocated together * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" @@ -1142,9 +1177,6 @@ struct spi_message { /* List of spi_res resources when the SPI message is processed */ struct list_head resources; - - /* For embedding transfers into the memory of the message */ - struct spi_transfer t[]; }; static inline void spi_message_init_no_memset(struct spi_message *m) @@ -1203,17 +1235,21 @@ struct spi_transfer *xfers, unsigned int num_xfers) */ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) { - struct spi_message *m; + struct spi_message_with_transfers { + struct spi_message m; + struct spi_transfer t[]; + } *mwt; + unsigned i; + + mwt = kzalloc(struct_size(mwt, t, ntrans), flags); + if (!mwt) + return NULL; - m = kzalloc(struct_size(m, t, ntrans), flags); - if (m) { - unsigned i; + spi_message_init_no_memset(&mwt->m); + for (i = 0; i < ntrans; i++) + spi_message_add_tail(&mwt->t[i], &mwt->m); - spi_message_init_no_memset(m); - for (i = 0; i < ntrans; i++) - spi_message_add_tail(&m->t[i], m); - } - return m; + return &mwt->m; } static inline void spi_message_free(struct spi_message *m) @@ -1637,8 +1673,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer) /* Compatibility layer */ #define spi_master spi_controller -#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX - #define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr) #define spi_master_set_devdata(_ctlr, _data) \ spi_controller_set_devdata(_ctlr, _data) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 31d3d747a9db..3fcd20de6ca8 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -456,6 +456,37 @@ static __always_inline int spin_is_contended(spinlock_t *lock) #endif /* CONFIG_PREEMPT_RT */ /* + * Does a critical section need to be broken due to another + * task waiting?: (technically does not depend on CONFIG_PREEMPTION, + * but a general need for low latency) + */ +static inline int spin_needbreak(spinlock_t *lock) +{ +#ifdef CONFIG_PREEMPTION + return spin_is_contended(lock); +#else + return 0; +#endif +} + +/* + * Check if a rwlock is contended. + * Returns non-zero if there is another task waiting on the rwlock. + * Returns zero if the lock is not contended or the system / underlying + * rwlock implementation does not support contention detection. + * Technically does not depend on CONFIG_PREEMPTION, but a general need + * for low latency. + */ +static inline int rwlock_needbreak(rwlock_t *lock) +{ +#ifdef CONFIG_PREEMPTION + return rwlock_is_contended(lock); +#else + return 0; +#endif +} + +/* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) */ @@ -507,6 +538,8 @@ DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t, raw_spin_lock(_T->lock), raw_spin_unlock(_T->lock)) +DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock)) + DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t, raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING), raw_spin_unlock(_T->lock)) @@ -515,23 +548,62 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, raw_spin_lock_irq(_T->lock), raw_spin_unlock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock)) + DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, raw_spin_lock_irqsave(_T->lock, _T->flags), raw_spin_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) +DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try, + raw_spin_trylock_irqsave(_T->lock, _T->flags)) + DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, spin_lock(_T->lock), spin_unlock(_T->lock)) +DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock)) + DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, spin_lock_irq(_T->lock), spin_unlock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, + spin_trylock_irq(_T->lock)) + DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, spin_lock_irqsave(_T->lock, _T->flags), spin_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) +DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try, + spin_trylock_irqsave(_T->lock, _T->flags)) + +DEFINE_LOCK_GUARD_1(read_lock, rwlock_t, + read_lock(_T->lock), + read_unlock(_T->lock)) + +DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t, + read_lock_irq(_T->lock), + read_unlock_irq(_T->lock)) + +DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t, + read_lock_irqsave(_T->lock, _T->flags), + read_unlock_irqrestore(_T->lock, _T->flags), + unsigned long flags) + +DEFINE_LOCK_GUARD_1(write_lock, rwlock_t, + write_lock(_T->lock), + write_unlock(_T->lock)) + +DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t, + write_lock_irq(_T->lock), + write_unlock_irq(_T->lock)) + +DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t, + write_lock_irqsave(_T->lock, _T->flags), + write_unlock_irqrestore(_T->lock, _T->flags), + unsigned long flags) + #undef __LINUX_INSIDE_SPINLOCK_H #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/splice.h b/include/linux/splice.h index 6c461573434d..9dec4861d09f 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -68,28 +68,37 @@ typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, typedef int (splice_direct_actor)(struct pipe_inode_info *, struct splice_desc *); -extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, - loff_t *, size_t, unsigned int, - splice_actor *); -extern ssize_t __splice_from_pipe(struct pipe_inode_info *, - struct splice_desc *, splice_actor *); -extern ssize_t splice_to_pipe(struct pipe_inode_info *, - struct splice_pipe_desc *); -extern ssize_t add_to_pipe(struct pipe_inode_info *, - struct pipe_buffer *); -long vfs_splice_read(struct file *in, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, - unsigned int flags); -extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, - splice_direct_actor *); -extern long do_splice(struct file *in, loff_t *off_in, - struct file *out, loff_t *off_out, - size_t len, unsigned int flags); +ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, + loff_t *ppos, size_t len, unsigned int flags, + splice_actor *actor); +ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, + struct splice_desc *sd, splice_actor *actor); +ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + struct splice_pipe_desc *spd); +ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf); +ssize_t vfs_splice_read(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags); +ssize_t splice_direct_to_actor(struct file *file, struct splice_desc *sd, + splice_direct_actor *actor); +ssize_t do_splice(struct file *in, loff_t *off_in, struct file *out, + loff_t *off_out, size_t len, unsigned int flags); +ssize_t do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + loff_t *opos, size_t len, unsigned int flags); +ssize_t splice_file_range(struct file *in, loff_t *ppos, struct file *out, + loff_t *opos, size_t len); -extern long do_tee(struct file *in, struct file *out, size_t len, - unsigned int flags); -extern ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out, - loff_t *ppos, size_t len, unsigned int flags); +static inline long splice_copy_file_range(struct file *in, loff_t pos_in, + struct file *out, loff_t pos_out, + size_t len) +{ + return splice_file_range(in, &pos_in, out, &pos_out, len); +} + +ssize_t do_tee(struct file *in, struct file *out, size_t len, + unsigned int flags); +ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out, + loff_t *ppos, size_t len, unsigned int flags); /* * for dynamic pipe sizing diff --git a/include/linux/spmi.h b/include/linux/spmi.h index eac1956a8727..28e8c8bd3944 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -120,6 +120,9 @@ static inline void spmi_controller_put(struct spmi_controller *ctrl) int spmi_controller_add(struct spmi_controller *ctrl); void spmi_controller_remove(struct spmi_controller *ctrl); +struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size); +int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl); + /** * struct spmi_driver - SPMI slave device driver * @driver: SPMI device drivers should initialize name and owner field of @@ -166,7 +169,7 @@ static inline void spmi_driver_unregister(struct spmi_driver *sdrv) struct device_node; -struct spmi_device *spmi_device_from_of(struct device_node *np); +struct spmi_device *spmi_find_device_by_of_node(struct device_node *np); int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf); int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf, size_t len); diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 127ef3b2e607..236610e4a8fa 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -229,7 +229,7 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp srcu_check_nmi_safety(ssp, true); retval = __srcu_read_lock_nmisafe(ssp); - rcu_lock_acquire(&ssp->dep_map); + rcu_try_lock_acquire(&ssp->dep_map); return retval; } diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index e58306783d8e..adcbb8f23600 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -11,8 +11,6 @@ * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free * stack traces often repeat, using stack depot allows to save about 100x space. * - * Stack traces are never removed from the stack depot. - * * Author: Alexander Potapenko <glider@google.com> * Copyright (C) 2016 Google, Inc. * @@ -32,6 +30,18 @@ typedef u32 depot_stack_handle_t; */ #define STACK_DEPOT_EXTRA_BITS 5 +typedef u32 depot_flags_t; + +/* + * Flags that can be passed to stack_depot_save_flags(); see the comment next + * to its declaration for more details. + */ +#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001) +#define STACK_DEPOT_FLAG_GET ((depot_flags_t)0x0002) + +#define STACK_DEPOT_FLAGS_NUM 2 +#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1)) + /* * Using stack depot requires its initialization, which can be done in 3 ways: * @@ -69,31 +79,39 @@ static inline int stack_depot_early_init(void) { return 0; } #endif /** - * __stack_depot_save - Save a stack trace to stack depot + * stack_depot_save_flags - Save a stack trace to stack depot * * @entries: Pointer to the stack trace * @nr_entries: Number of frames in the stack * @alloc_flags: Allocation GFP flags - * @can_alloc: Allocate stack pools (increased chance of failure if false) + * @depot_flags: Stack depot flags + * + * Saves a stack trace from @entries array of size @nr_entries. + * + * If STACK_DEPOT_FLAG_CAN_ALLOC is set in @depot_flags, stack depot can + * replenish the stack pools in case no space is left (allocates using GFP + * flags of @alloc_flags). Otherwise, stack depot avoids any allocations and + * fails if no space is left to store the stack trace. * - * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is - * %true, stack depot can replenish the stack pools in case no space is left - * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids - * any allocations and fails if no space is left to store the stack trace. + * If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment + * the refcount on the saved stack trace if it already exists in stack depot. + * Users of this flag must also call stack_depot_put() when keeping the stack + * trace is no longer required to avoid overflowing the refcount. * * If the provided stack trace comes from the interrupt context, only the part * up to the interrupt entry is saved. * - * Context: Any context, but setting @can_alloc to %false is required if + * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if * alloc_pages() cannot be used from the current context. Currently * this is the case for contexts where neither %GFP_ATOMIC nor * %GFP_NOWAIT can be used (NMI, raw_spin_lock). * * Return: Handle of the stack struct stored in depot, 0 on failure */ -depot_stack_handle_t __stack_depot_save(unsigned long *entries, - unsigned int nr_entries, - gfp_t gfp_flags, bool can_alloc); +depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, + unsigned int nr_entries, + gfp_t gfp_flags, + depot_flags_t depot_flags); /** * stack_depot_save - Save a stack trace to stack depot @@ -102,8 +120,11 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, * @nr_entries: Number of frames in the stack * @alloc_flags: Allocation GFP flags * - * Context: Contexts where allocations via alloc_pages() are allowed. - * See __stack_depot_save() for more details. + * Does not increment the refcount on the saved stack trace; see + * stack_depot_save_flags() for more details. + * + * Context: Contexts where allocations via alloc_pages() are allowed; + * see stack_depot_save_flags() for more details. * * Return: Handle of the stack trace stored in depot, 0 on failure */ @@ -142,6 +163,18 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, int spaces); /** + * stack_depot_put - Drop a reference to a stack trace from stack depot + * + * @handle: Stack depot handle returned from stack_depot_save() + * + * The stack trace is evicted from stack depot once all references to it have + * been dropped (once the number of stack_depot_evict() calls matches the + * number of stack_depot_save_flags() calls with STACK_DEPOT_FLAG_GET set for + * this stack trace). + */ +void stack_depot_put(depot_stack_handle_t handle); + +/** * stack_depot_set_extra_bits - Set extra bits in a stack depot handle * * @handle: Stack depot handle returned from stack_depot_save() diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h index c36e7a3b45e7..3be2cb564710 100644 --- a/include/linux/stackleak.h +++ b/include/linux/stackleak.h @@ -14,6 +14,7 @@ #ifdef CONFIG_GCC_PLUGIN_STACKLEAK #include <asm/stacktrace.h> +#include <linux/linkage.h> /* * The lowest address on tsk's stack which we can plausibly erase. @@ -76,6 +77,11 @@ static inline void stackleak_task_init(struct task_struct *t) # endif } +asmlinkage void noinstr stackleak_erase(void); +asmlinkage void noinstr stackleak_erase_on_task_stack(void); +asmlinkage void noinstr stackleak_erase_off_task_stack(void); +void __no_caller_saved_registers noinstr stackleak_track_stack(void); + #else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ static inline void stackleak_task_init(struct task_struct *t) { } #endif diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index ce89cc3e4913..dee5ad6e48c5 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -139,6 +139,7 @@ struct stmmac_rxq_cfg { struct stmmac_txq_cfg { u32 weight; + bool coe_unsupported; u8 mode_to_use; /* Credit Base Shaper parameters */ u32 send_slope; @@ -174,6 +175,7 @@ struct stmmac_fpe_cfg { bool hs_enable; /* FPE handshake enable */ enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */ enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */ + u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */ }; struct stmmac_safety_feature_cfg { @@ -302,7 +304,6 @@ struct plat_stmmacenet_data { unsigned int eee_usecs_rate; struct pci_dev *pdev; int int_snapshot_num; - int ext_snapshot_num; int msi_mac_vec; int msi_wol_vec; int msi_lpi_vec; diff --git a/include/linux/string.h b/include/linux/string.h index dbfc66400050..ab148d8dbfc1 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -2,10 +2,13 @@ #ifndef _LINUX_STRING_H_ #define _LINUX_STRING_H_ +#include <linux/array_size.h> #include <linux/compiler.h> /* for inline */ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ +#include <linux/err.h> /* for ERR_PTR() */ #include <linux/errno.h> /* for E2BIG */ +#include <linux/overflow.h> /* for check_mul_overflow() */ #include <linux/stdarg.h> #include <uapi/linux/string.h> @@ -14,6 +17,44 @@ extern void *memdup_user(const void __user *, size_t); extern void *vmemdup_user(const void __user *, size_t); extern void *memdup_user_nul(const void __user *, size_t); +/** + * memdup_array_user - duplicate array from user space + * @src: source address in user space + * @n: number of array members to copy + * @size: size of one array member + * + * Return: an ERR_PTR() on failure. Result is physically + * contiguous, to be freed by kfree(). + */ +static inline void *memdup_array_user(const void __user *src, size_t n, size_t size) +{ + size_t nbytes; + + if (check_mul_overflow(n, size, &nbytes)) + return ERR_PTR(-EOVERFLOW); + + return memdup_user(src, nbytes); +} + +/** + * vmemdup_array_user - duplicate array from user space + * @src: source address in user space + * @n: number of array members to copy + * @size: size of one array member + * + * Return: an ERR_PTR() on failure. Result may be not + * physically contiguous. Use kvfree() to free. + */ +static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size) +{ + size_t nbytes; + + if (check_mul_overflow(n, size, &nbytes)) + return ERR_PTR(-EOVERFLOW); + + return vmemdup_user(src, nbytes); +} + /* * Include machine specific inline routines */ @@ -25,9 +66,6 @@ extern char * strcpy(char *,const char *); #ifndef __HAVE_ARCH_STRNCPY extern char * strncpy(char *,const char *, __kernel_size_t); #endif -#ifndef __HAVE_ARCH_STRLCPY -size_t strlcpy(char *, const char *, size_t); -#endif #ifndef __HAVE_ARCH_STRSCPY ssize_t strscpy(char *, const char *, size_t); #endif @@ -277,10 +315,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, */ #define strtomem_pad(dest, src, pad) do { \ const size_t _dest_len = __builtin_object_size(dest, 1); \ + const size_t _src_len = __builtin_object_size(src, 1); \ \ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ _dest_len == (size_t)-1); \ - memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \ + memcpy_and_pad(dest, _dest_len, src, \ + strnlen(src, min(_src_len, _dest_len)), pad); \ } while (0) /** @@ -298,10 +338,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, */ #define strtomem(dest, src) do { \ const size_t _dest_len = __builtin_object_size(dest, 1); \ + const size_t _src_len = __builtin_object_size(src, 1); \ \ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ _dest_len == (size_t)-1); \ - memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \ + memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \ } while (0) /** diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index 9d1f5bb74dd5..58fb1f90eda5 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -24,8 +24,8 @@ enum string_size_units { STRING_UNITS_2, /* use binary powers of 2^10 */ }; -void string_get_size(u64 size, u64 blk_size, enum string_size_units units, - char *buf, int len); +int string_get_size(u64 size, u64 blk_size, enum string_size_units units, + char *buf, int len); int parse_int_array_user(const char __user *from, size_t count, int **array); diff --git a/include/linux/stringify.h b/include/linux/stringify.h index 841cec8ed525..0e84cbe65270 100644 --- a/include/linux/stringify.h +++ b/include/linux/stringify.h @@ -9,4 +9,6 @@ #define __stringify_1(x...) #x #define __stringify(x...) __stringify_1(x) +#define FILE_LINE __FILE__ ":" __stringify(__LINE__) + #endif /* !__LINUX_STRINGIFY_H */ diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index db30a159f9d5..f22bf915dcf6 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -20,7 +20,8 @@ #ifdef CONFIG_SUNRPC_BACKCHANNEL struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid); void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); -void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task); +void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task, + const struct rpc_timeout *to); void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index af7358277f1c..5e9d1469c6fa 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -92,6 +92,7 @@ struct rpc_clnt { }; const struct cred *cl_cred; unsigned int cl_max_connect; /* max number of transports not to the same IP */ + struct super_block *pipefs_sb; }; /* @@ -251,7 +252,6 @@ void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *, const char *rpc_proc_name(const struct rpc_task *task); -void rpc_clnt_xprt_switch_put(struct rpc_clnt *); void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *); bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 8ada7dc802d3..2d61987b3545 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -38,6 +38,17 @@ struct rpc_wait { }; /* + * This describes a timeout strategy + */ +struct rpc_timeout { + unsigned long to_initval, /* initial timeout */ + to_maxval, /* max timeout */ + to_increment; /* if !exponential */ + unsigned int to_retries; /* max # of retries */ + unsigned char to_exponential; +}; + +/* * This is the RPC task struct */ struct rpc_task { @@ -205,7 +216,8 @@ struct rpc_wait_queue { */ struct rpc_task *rpc_new_task(const struct rpc_task_setup *); struct rpc_task *rpc_run_task(const struct rpc_task_setup *); -struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req); +struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, + struct rpc_timeout *timeout); void rpc_put_task(struct rpc_task *); void rpc_put_task_async(struct rpc_task *); bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index dbf5b21feafe..67cf1c9efd80 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -17,6 +17,7 @@ #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/svcauth.h> +#include <linux/lwq.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/pagevec.h> @@ -33,10 +34,10 @@ */ struct svc_pool { unsigned int sp_id; /* pool id; also node id on NUMA */ - spinlock_t sp_lock; /* protects all fields */ - struct list_head sp_sockets; /* pending sockets */ - unsigned int sp_nrthreads; /* # of threads in pool */ + struct lwq sp_xprts; /* pending transports */ + atomic_t sp_nrthreads; /* # of threads in pool */ struct list_head sp_all_threads; /* all server threads */ + struct llist_head sp_idle_threads; /* idle server threads */ /* statistics on pool operation */ struct percpu_counter sp_messages_arrived; @@ -49,7 +50,8 @@ struct svc_pool { /* bits for sp_flags */ enum { SP_TASK_PENDING, /* still work to do even if no xprt is queued */ - SP_CONGESTED, /* all threads are busy, none idle */ + SP_NEED_VICTIM, /* One thread needs to agree to exit */ + SP_VICTIM_REMAINS, /* One thread needs to actually exit */ }; @@ -67,7 +69,6 @@ struct svc_serv { struct svc_program * sv_program; /* RPC program */ struct svc_stat * sv_stats; /* RPC statistics */ spinlock_t sv_lock; - struct kref sv_refcnt; unsigned int sv_nrthreads; /* # of server threads */ unsigned int sv_maxconn; /* max connections allowed or * '0' causing max to be based @@ -88,41 +89,20 @@ struct svc_serv { int (*sv_threadfn)(void *data); #if defined(CONFIG_SUNRPC_BACKCHANNEL) - struct list_head sv_cb_list; /* queue for callback requests + struct lwq sv_cb_list; /* queue for callback requests * that arrive over the same * connection */ - spinlock_t sv_cb_lock; /* protects the svc_cb_list */ - wait_queue_head_t sv_cb_waitq; /* sleep here if there are no - * entries in the svc_cb_list */ bool sv_bc_enabled; /* service uses backchannel */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ }; -/** - * svc_get() - increment reference count on a SUNRPC serv - * @serv: the svc_serv to have count incremented - * - * Returns: the svc_serv that was passed in. - */ -static inline struct svc_serv *svc_get(struct svc_serv *serv) -{ - kref_get(&serv->sv_refcnt); - return serv; -} +/* This is used by pool_stats to find and lock an svc */ +struct svc_info { + struct svc_serv *serv; + struct mutex *mutex; +}; -void svc_destroy(struct kref *); - -/** - * svc_put - decrement reference count on a SUNRPC serv - * @serv: the svc_serv to have count decremented - * - * When the reference count reaches zero, svc_destroy() - * is called to clean up and free the serv. - */ -static inline void svc_put(struct svc_serv *serv) -{ - kref_put(&serv->sv_refcnt, svc_destroy); -} +void svc_destroy(struct svc_serv **svcp); /* * Maximum payload size supported by a kernel RPC server. @@ -186,6 +166,7 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp); */ struct svc_rqst { struct list_head rq_all; /* all threads list */ + struct llist_node rq_idle; /* On the idle list */ struct rcu_head rq_rcu_head; /* for RCU deferred kfree */ struct svc_xprt * rq_xprt; /* transport ptr */ @@ -250,7 +231,10 @@ struct svc_rqst { struct net *rq_bc_net; /* pointer to backchannel's * net namespace */ + unsigned long bc_to_initval; + unsigned int bc_to_retries; void ** rq_lease_breaker; /* The v4 client breaking a lease */ + unsigned int rq_status_counter; /* RPC processing counter */ }; /* bits for rq_flags */ @@ -259,10 +243,7 @@ enum { RQ_LOCAL, /* local request */ RQ_USEDEFERRAL, /* use deferral */ RQ_DROPME, /* drop current reply */ - RQ_SPLICE_OK, /* turned off in gss privacy to prevent - * encrypting page cache pages */ - RQ_VICTIM, /* about to be shut down */ - RQ_BUSY, /* request is busy */ + RQ_VICTIM, /* Have agreed to shut down */ RQ_DATA, /* request has data */ }; @@ -301,6 +282,28 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst) return (struct sockaddr *) &rqst->rq_daddr; } +/** + * svc_thread_should_stop - check if this thread should stop + * @rqstp: the thread that might need to stop + * + * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS + * are set. The first thread which sees SP_NEED_VICTIM clears it, becoming + * the victim using this function. It should then promptly call + * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS + * so the task waiting for a thread to exit can wake and continue. + * + * Return values: + * %true: caller should invoke svc_exit_thread() + * %false: caller should do nothing + */ +static inline bool svc_thread_should_stop(struct svc_rqst *rqstp) +{ + if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags)) + set_bit(RQ_VICTIM, &rqstp->rq_flags); + + return test_bit(RQ_VICTIM, &rqstp->rq_flags); +} + struct svc_deferred_req { u32 prot; /* protocol (UDP or TCP) */ struct svc_xprt *xprt; @@ -411,10 +414,9 @@ void svc_exit_thread(struct svc_rqst *); struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, int (*threadfn)(void *data)); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); -int svc_pool_stats_open(struct svc_serv *serv, struct file *file); +int svc_pool_stats_open(struct svc_info *si, struct file *file); void svc_process(struct svc_rqst *rqstp); -int bc_svc_process(struct svc_serv *, struct rpc_rqst *, - struct svc_rqst *); +void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp); int svc_register(const struct svc_serv *, struct net *, const int, const unsigned short, const unsigned short); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index a5ee0af2a310..e7595ae62fe2 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -65,6 +65,7 @@ extern unsigned int svcrdma_ord; extern unsigned int svcrdma_max_requests; extern unsigned int svcrdma_max_bc_requests; extern unsigned int svcrdma_max_req_size; +extern struct workqueue_struct *svcrdma_wq; extern struct percpu_counter svcrdma_stat_read; extern struct percpu_counter svcrdma_stat_recv; @@ -97,6 +98,7 @@ struct svcxprt_rdma { u32 sc_pending_recvs; u32 sc_recv_batch; struct list_head sc_rq_dto_q; + struct list_head sc_read_complete_q; spinlock_t sc_rq_dto_lock; struct ib_qp *sc_qp; struct ib_cq *sc_rq_cq; @@ -115,6 +117,13 @@ struct svcxprt_rdma { /* sc_flags */ #define RDMAXPRT_CONN_PENDING 3 +static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp) +{ + struct svc_xprt *xprt = rqstp->rq_xprt; + + return container_of(xprt, struct svcxprt_rdma, sc_xprt); +} + /* * Default connection parameters */ @@ -126,6 +135,43 @@ enum { #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD +/** + * svc_rdma_send_cid_init - Initialize a Receive Queue completion ID + * @rdma: controlling transport + * @cid: completion ID to initialize + */ +static inline void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma, + struct rpc_rdma_cid *cid) +{ + cid->ci_queue_id = rdma->sc_rq_cq->res.id; + cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); +} + +/** + * svc_rdma_send_cid_init - Initialize a Send Queue completion ID + * @rdma: controlling transport + * @cid: completion ID to initialize + */ +static inline void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, + struct rpc_rdma_cid *cid) +{ + cid->ci_queue_id = rdma->sc_sq_cq->res.id; + cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); +} + +/* + * A chunk context tracks all I/O for moving one Read or Write + * chunk. This is a set of rdma_rw's that handle data movement + * for all segments of one chunk. + */ +struct svc_rdma_chunk_ctxt { + struct rpc_rdma_cid cc_cid; + struct ib_cqe cc_cqe; + struct list_head cc_rwctxts; + ktime_t cc_posttime; + int cc_sqecount; +}; + struct svc_rdma_recv_ctxt { struct llist_node rc_node; struct list_head rc_list; @@ -136,22 +182,33 @@ struct svc_rdma_recv_ctxt { void *rc_recv_buf; struct xdr_stream rc_stream; u32 rc_byte_len; - unsigned int rc_page_count; u32 rc_inv_rkey; __be32 rc_msgtype; + /* State for pulling a Read chunk */ + unsigned int rc_pageoff; + unsigned int rc_curpage; + unsigned int rc_readbytes; + struct xdr_buf rc_saved_arg; + struct svc_rdma_chunk_ctxt rc_cc; + struct svc_rdma_pcl rc_call_pcl; struct svc_rdma_pcl rc_read_pcl; struct svc_rdma_chunk *rc_cur_result_payload; struct svc_rdma_pcl rc_write_pcl; struct svc_rdma_pcl rc_reply_pcl; + + unsigned int rc_page_count; + struct page *rc_pages[RPCSVC_MAXPAGES]; }; struct svc_rdma_send_ctxt { struct llist_node sc_node; struct rpc_rdma_cid sc_cid; + struct work_struct sc_work; + struct svcxprt_rdma *sc_rdma; struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; struct xdr_buf sc_hdrbuf; @@ -180,6 +237,11 @@ extern int svc_rdma_recvfrom(struct svc_rqst *); /* svc_rdma_rw.c */ extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); +extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma, + struct svc_rdma_chunk_ctxt *cc); +extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma, + struct svc_rdma_chunk_ctxt *cc, + enum dma_data_direction dir); extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, const struct svc_rdma_chunk *chunk, const struct xdr_buf *xdr); @@ -200,7 +262,8 @@ extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt); extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, - const struct svc_rdma_recv_ctxt *rctxt, + const struct svc_rdma_pcl *write_pcl, + const struct svc_rdma_pcl *reply_pcl, const struct xdr_buf *xdr); extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index fa55d12dc765..8e20cd60e2e7 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -54,7 +54,7 @@ struct svc_xprt { const struct svc_xprt_ops *xpt_ops; struct kref xpt_ref; struct list_head xpt_list; - struct list_head xpt_ready; + struct lwq_node xpt_ready; unsigned long xpt_flags; struct svc_serv *xpt_server; /* service for transport */ diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 6f90203edbf8..61c455f1e1f5 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -131,8 +131,11 @@ enum svc_auth_status { * This call releases a domain. * * set_client() - * Givens a pending request (struct svc_rqst), finds and assigns + * Given a pending request (struct svc_rqst), finds and assigns * an appropriate 'auth_domain' as the client. + * + * pseudoflavor() + * Returns RPC_AUTH pseudoflavor in use by @rqstp. */ struct auth_ops { char * name; @@ -143,11 +146,13 @@ struct auth_ops { int (*release)(struct svc_rqst *rqstp); void (*domain_release)(struct auth_domain *dom); enum svc_auth_status (*set_client)(struct svc_rqst *rqstp); + rpc_authflavor_t (*pseudoflavor)(struct svc_rqst *rqstp); }; struct svc_xprt; extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp); +extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp); extern int svc_authorise(struct svc_rqst *rqstp); extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp); extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 5b4fb3c791bc..2f8dc47f1eb0 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -779,7 +779,9 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr, if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) return -EBADMSG; - p = xdr_inline_decode(xdr, size_mul(len, sizeof(*p))); + if (U32_MAX >= SIZE_MAX / sizeof(*p) && len > SIZE_MAX / sizeof(*p)) + return -EBADMSG; + p = xdr_inline_decode(xdr, len * sizeof(*p)); if (unlikely(!p)) return -EBADMSG; if (array == NULL) diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 4ecc89301eb7..464f6a9492ab 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -30,17 +30,6 @@ #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) -/* - * This describes a timeout strategy - */ -struct rpc_timeout { - unsigned long to_initval, /* initial timeout */ - to_maxval, /* max timeout */ - to_increment; /* if !exponential */ - unsigned int to_retries; /* max # of retries */ - unsigned char to_exponential; -}; - enum rpc_display_format_t { RPC_DISPLAY_ADDR = 0, RPC_DISPLAY_PORT, @@ -57,6 +46,7 @@ struct xprt_class; struct seq_file; struct svc_serv; struct net; +#include <linux/lwq.h> /* * This describes a complete RPC request @@ -121,7 +111,7 @@ struct rpc_rqst { int rq_ntrans; #if defined(CONFIG_SUNRPC_BACKCHANNEL) - struct list_head rq_bc_list; /* Callback service list */ + struct lwq_node rq_bc_list; /* Callback service list */ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ #endif /* CONFIG_SUNRPC_BACKCHANEL */ diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h deleted file mode 100644 index 8d3376775813..000000000000 --- a/include/linux/superhyway.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * include/linux/superhyway.h - * - * SuperHyway Bus definitions - * - * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __LINUX_SUPERHYWAY_H -#define __LINUX_SUPERHYWAY_H - -#include <linux/device.h> - -/* - * SuperHyway IDs - */ -#define SUPERHYWAY_DEVICE_ID_SH5_DMAC 0x0183 - -struct superhyway_vcr_info { - u8 perr_flags; /* P-port Error flags */ - u8 merr_flags; /* Module Error flags */ - u16 mod_vers; /* Module Version */ - u16 mod_id; /* Module ID */ - u8 bot_mb; /* Bottom Memory block */ - u8 top_mb; /* Top Memory block */ -}; - -struct superhyway_ops { - int (*read_vcr)(unsigned long base, struct superhyway_vcr_info *vcr); - int (*write_vcr)(unsigned long base, struct superhyway_vcr_info vcr); -}; - -struct superhyway_bus { - struct superhyway_ops *ops; -}; - -extern struct superhyway_bus superhyway_channels[]; - -struct superhyway_device_id { - unsigned int id; - unsigned long driver_data; -}; - -struct superhyway_device; -extern struct bus_type superhyway_bus_type; - -struct superhyway_driver { - char *name; - - const struct superhyway_device_id *id_table; - struct device_driver drv; - - int (*probe)(struct superhyway_device *dev, const struct superhyway_device_id *id); - void (*remove)(struct superhyway_device *dev); -}; - -#define to_superhyway_driver(d) container_of((d), struct superhyway_driver, drv) - -struct superhyway_device { - char name[32]; - - struct device dev; - - struct superhyway_device_id id; - struct superhyway_driver *drv; - struct superhyway_bus *bus; - - int num_resources; - struct resource *resource; - struct superhyway_vcr_info vcr; -}; - -#define to_superhyway_device(d) container_of((d), struct superhyway_device, dev) - -#define superhyway_get_drvdata(d) dev_get_drvdata(&(d)->dev) -#define superhyway_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) - -static inline int -superhyway_read_vcr(struct superhyway_device *dev, unsigned long base, - struct superhyway_vcr_info *vcr) -{ - return dev->bus->ops->read_vcr(base, vcr); -} - -static inline int -superhyway_write_vcr(struct superhyway_device *dev, unsigned long base, - struct superhyway_vcr_info vcr) -{ - return dev->bus->ops->write_vcr(base, vcr); -} - -extern int superhyway_scan_bus(struct superhyway_bus *); - -/* drivers/sh/superhyway/superhyway.c */ -int superhyway_register_driver(struct superhyway_driver *); -void superhyway_unregister_driver(struct superhyway_driver *); -int superhyway_add_device(unsigned long base, struct superhyway_device *, struct superhyway_bus *); -int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices); - -/* drivers/sh/superhyway/superhyway-sysfs.c */ -extern const struct attribute_group *superhyway_dev_groups[]; - -#endif /* __LINUX_SUPERHYWAY_H */ - diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h index cb7980805920..5b67f0f47d80 100644 --- a/include/linux/surface_aggregator/controller.h +++ b/include/linux/surface_aggregator/controller.h @@ -44,7 +44,7 @@ struct ssam_event { u8 command_id; u8 instance_id; u16 length; - u8 data[]; + u8 data[] __counted_by(length); }; /** diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h index 42b249b4c24b..8cd8c38cf3f3 100644 --- a/include/linux/surface_aggregator/device.h +++ b/include/linux/surface_aggregator/device.h @@ -193,7 +193,6 @@ struct ssam_device_driver { #ifdef CONFIG_SURFACE_AGGREGATOR_BUS -extern struct bus_type ssam_bus_type; extern const struct device_type ssam_device_type; /** diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h index 5c4ae1a26183..d8dbef6b7fc2 100644 --- a/include/linux/surface_aggregator/serial_hub.h +++ b/include/linux/surface_aggregator/serial_hub.h @@ -12,7 +12,7 @@ #ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H #define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H -#include <linux/crc-ccitt.h> +#include <linux/crc-itu-t.h> #include <linux/kref.h> #include <linux/ktime.h> #include <linux/list.h> @@ -188,7 +188,7 @@ static_assert(sizeof(struct ssh_command) == 8); */ static inline u16 ssh_crc(const u8 *buf, size_t len) { - return crc_ccitt_false(0xffff, buf, len); + return crc_itu_t(0xffff, buf, len); } /* diff --git a/include/linux/swap.h b/include/linux/swap.h index 493487ed7c38..4db00ddad261 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -298,6 +298,7 @@ struct swap_info_struct { unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ + struct bdev_handle *bdev_handle;/* open handle of the bdev */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ @@ -396,9 +397,6 @@ void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); -extern void lru_cache_add_inactive_or_unevictable(struct page *page, - struct vm_area_struct *vma); - /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, @@ -489,13 +487,12 @@ extern sector_t swapdev_block(int, pgoff_t); extern int __swap_count(swp_entry_t entry); extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); -extern struct swap_info_struct *page_swap_info(struct page *); -extern struct swap_info_struct *swp_swap_info(swp_entry_t entry); +struct swap_info_struct *swp_swap_info(swp_entry_t entry); struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); extern void exit_swap_address_space(unsigned int type); extern struct swap_info_struct *get_swap_device(swp_entry_t entry); -sector_t swap_page_sector(struct page *page); +sector_t swap_folio_sector(struct folio *folio); static inline void put_swap_device(struct swap_info_struct *si) { diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b4536626f8ff..ecde0312dd52 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -172,14 +172,23 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) if (!mem) return false; - if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) { - /* Pairs with smp_wmb() in swiotlb_find_slots() and - * swiotlb_dyn_alloc(), which modify the RCU lists. - */ - smp_rmb(); - return swiotlb_find_pool(dev, paddr); - } +#ifdef CONFIG_SWIOTLB_DYNAMIC + /* + * All SWIOTLB buffer addresses must have been returned by + * swiotlb_tbl_map_single() and passed to a device driver. + * If a SWIOTLB address is checked on another CPU, then it was + * presumably loaded by the device driver from an unspecified private + * data structure. Make sure that this load is ordered before reading + * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool(). + * + * This barrier pairs with smp_mb() in swiotlb_find_slots(). + */ + smp_rmb(); + return READ_ONCE(dev->dma_uses_io_tlb) && + swiotlb_find_pool(dev, paddr); +#else return paddr >= mem->defpool.start && paddr < mem->defpool.end; +#endif } static inline bool is_swiotlb_force_bounce(struct device *dev) diff --git a/include/linux/syscall_user_dispatch.h b/include/linux/syscall_user_dispatch.h index 641ca8880995..3858a6ffdd5c 100644 --- a/include/linux/syscall_user_dispatch.h +++ b/include/linux/syscall_user_dispatch.h @@ -6,16 +6,10 @@ #define _SYSCALL_USER_DISPATCH_H #include <linux/thread_info.h> +#include <linux/syscall_user_dispatch_types.h> #ifdef CONFIG_GENERIC_ENTRY -struct syscall_user_dispatch { - char __user *selector; - unsigned long offset; - unsigned long len; - bool on_dispatch; -}; - int set_syscall_user_dispatch(unsigned long mode, unsigned long offset, unsigned long len, char __user *selector); @@ -29,7 +23,6 @@ int syscall_user_dispatch_set_config(struct task_struct *task, unsigned long siz void __user *data); #else -struct syscall_user_dispatch {}; static inline int set_syscall_user_dispatch(unsigned long mode, unsigned long offset, unsigned long len, char __user *selector) diff --git a/include/linux/syscall_user_dispatch_types.h b/include/linux/syscall_user_dispatch_types.h new file mode 100644 index 000000000000..3be36b06c7d7 --- /dev/null +++ b/include/linux/syscall_user_dispatch_types.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SYSCALL_USER_DISPATCH_TYPES_H +#define _SYSCALL_USER_DISPATCH_TYPES_H + +#include <linux/types.h> + +#ifdef CONFIG_GENERIC_ENTRY + +struct syscall_user_dispatch { + char __user *selector; + unsigned long offset; + unsigned long len; + bool on_dispatch; +}; + +#else + +struct syscall_user_dispatch {}; + +#endif + +#endif /* _SYSCALL_USER_DISPATCH_TYPES_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 22bc6bc147f8..77eb9b0e7685 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -71,9 +71,12 @@ struct clone_args; struct open_how; struct mount_attr; struct landlock_ruleset_attr; +struct lsm_ctx; enum landlock_rule_type; struct cachestat_range; struct cachestat; +struct statmount; +struct mnt_id_req; #include <linux/types.h> #include <linux/aio_abi.h> @@ -125,6 +128,7 @@ struct cachestat; #define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL)) #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a #define __SC_CAST(t, a) (__force t) a +#define __SC_TYPE(t, a) t #define __SC_ARGS(t, a) a #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long)) @@ -355,7 +359,6 @@ asmlinkage long sys_lremovexattr(const char __user *path, const char __user *name); asmlinkage long sys_fremovexattr(int fd, const char __user *name); asmlinkage long sys_getcwd(char __user *buf, unsigned long size); -asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len); asmlinkage long sys_eventfd2(unsigned int count, int flags); asmlinkage long sys_epoll_create1(int flags); asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, @@ -408,6 +411,12 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz, asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf); asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, struct statfs64 __user *buf); +asmlinkage long sys_statmount(const struct mnt_id_req __user *req, + struct statmount __user *buf, size_t bufsize, + unsigned int flags); +asmlinkage long sys_listmount(const struct mnt_id_req __user *req, + u64 __user *mnt_ids, size_t nr_mnt_ids, + unsigned int flags); asmlinkage long sys_truncate(const char __user *path, long length); asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length); #if BITS_PER_LONG == 32 @@ -549,6 +558,16 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, asmlinkage long sys_futex_waitv(struct futex_waitv *waiters, unsigned int nr_futexes, unsigned int flags, struct __kernel_timespec __user *timeout, clockid_t clockid); + +asmlinkage long sys_futex_wake(void __user *uaddr, unsigned long mask, int nr, unsigned int flags); + +asmlinkage long sys_futex_wait(void __user *uaddr, unsigned long val, unsigned long mask, + unsigned int flags, struct __kernel_timespec __user *timespec, + clockid_t clockid); + +asmlinkage long sys_futex_requeue(struct futex_waitv __user *waiters, + unsigned int flags, int nr_wake, int nr_requeue); + asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp, struct __kernel_timespec __user *rmtp); asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp, @@ -940,6 +959,11 @@ asmlinkage long sys_cachestat(unsigned int fd, struct cachestat_range __user *cstat_range, struct cachestat __user *cstat, unsigned int flags); asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags); +asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx, + size_t *size, __u32 flags); +asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx, + size_t size, __u32 flags); +asmlinkage long sys_lsm_list_modules(u64 *ids, size_t *size, u32 flags); /* * Architecture-specific system calls diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 09d7429d67c0..ee7d33b89e9e 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -210,11 +210,6 @@ struct ctl_table_root { int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); }; -/* struct ctl_path describes where in the hierarchy a table is added */ -struct ctl_path { - const char *procname; -}; - #define register_sysctl(path, table) \ register_sysctl_sz(path, table, ARRAY_SIZE(table)) @@ -242,6 +237,7 @@ extern void __register_sysctl_init(const char *path, struct ctl_table *table, extern struct ctl_table_header *register_sysctl_mount_point(const char *path); void do_sysctl_args(void); +bool sysctl_is_alias(char *param); int do_proc_douintvec(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos, int (*conv)(unsigned long *lvalp, @@ -254,8 +250,6 @@ extern int unaligned_enabled; extern int unaligned_dump_stack; extern int no_unaligned_warning; -#define SYSCTL_PERM_EMPTY_DIR (1 << 0) - #else /* CONFIG_SYSCTL */ static inline void register_sysctl_init(const char *path, struct ctl_table *table) @@ -287,6 +281,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p, static inline void do_sysctl_args(void) { } + +static inline bool sysctl_is_alias(char *param) +{ + return false; +} #endif /* CONFIG_SYSCTL */ int sysctl_max_threads(struct ctl_table *table, int write, void *buffer, diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index fd3fe5c8c17f..b717a70219f6 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -181,6 +181,8 @@ struct bin_attribute { char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *, + loff_t, int); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, struct vm_area_struct *vma); }; diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h index b0d36a9934cc..5cf6f6f82aa7 100644 --- a/include/linux/tca6416_keypad.h +++ b/include/linux/tca6416_keypad.h @@ -25,7 +25,6 @@ struct tca6416_keys_platform_data { unsigned int rep:1; /* enable input subsystem auto repeat */ uint16_t pinmask; uint16_t invert; - int irq_is_gpio; int use_polling; /* use polling if Interrupt is not connected*/ }; #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 3c5efeeb024f..89b290d8c8dc 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -152,6 +152,7 @@ struct tcp_request_sock { u64 snt_synack; /* first SYNACK sent time */ bool tfo_listener; bool is_mptcp; + bool req_usec_ts; #if IS_ENABLED(CONFIG_MPTCP) bool drop_req; #endif @@ -165,6 +166,11 @@ struct tcp_request_sock { * after data-in-SYN. */ u8 syn_tos; +#ifdef CONFIG_TCP_AO + u8 ao_keyid; + u8 ao_rcv_next; + bool used_tcp_ao; +#endif }; static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) @@ -172,26 +178,133 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) return (struct tcp_request_sock *)req; } +static inline bool tcp_rsk_used_ao(const struct request_sock *req) +{ +#ifndef CONFIG_TCP_AO + return false; +#else + return tcp_rsk(req)->used_tcp_ao; +#endif +} + #define TCP_RMEM_TO_WIN_SCALE 8 struct tcp_sock { + /* Cacheline organization can be found documented in + * Documentation/networking/net_cachelines/tcp_sock.rst. + * Please update the document when adding new fields. + */ + /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; - u16 tcp_header_len; /* Bytes of tcp header to send */ + + /* TX read-mostly hotpath cache lines */ + __cacheline_group_begin(tcp_sock_read_tx); + /* timestamp of last sent data packet (for restart window) */ + u32 max_window; /* Maximal window ever seen from peer */ + u32 rcv_ssthresh; /* Current window clamp */ + u32 reordering; /* Packet reordering metric. */ + u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ u16 gso_segs; /* Max number of segs per GSO packet */ + /* from STCP, retrans queue hinting */ + struct sk_buff *lost_skb_hint; + struct sk_buff *retransmit_skb_hint; + __cacheline_group_end(tcp_sock_read_tx); + + /* TXRX read-mostly hotpath cache lines */ + __cacheline_group_begin(tcp_sock_read_txrx); + u32 tsoffset; /* timestamp offset */ + u32 snd_wnd; /* The window we expect to receive */ + u32 mss_cache; /* Cached effective mss, not including SACKS */ + u32 snd_cwnd; /* Sending congestion window */ + u32 prr_out; /* Total number of pkts sent during Recovery. */ + u32 lost_out; /* Lost packets */ + u32 sacked_out; /* SACK'd packets */ + u16 tcp_header_len; /* Bytes of tcp header to send */ + u8 chrono_type : 2, /* current chronograph type */ + repair : 1, + is_sack_reneg:1, /* in recovery from loss with SACK reneg? */ + is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ + __cacheline_group_end(tcp_sock_read_txrx); + + /* RX read-mostly hotpath cache lines */ + __cacheline_group_begin(tcp_sock_read_rx); + u32 copied_seq; /* Head of yet unread data */ + u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ + u32 snd_wl1; /* Sequence for window update */ + u32 tlp_high_seq; /* snd_nxt at the time of TLP */ + u32 rttvar_us; /* smoothed mdev_max */ + u32 retrans_out; /* Retransmitted packets out */ + u16 advmss; /* Advertised MSS */ + u16 urg_data; /* Saved octet of OOB data and control flags */ + u32 lost; /* Total data packets lost incl. rexmits */ + struct minmax rtt_min; + /* OOO segments go in this rbtree. Socket lock must be held. */ + struct rb_root out_of_order_queue; + u32 snd_ssthresh; /* Slow start size threshold */ + __cacheline_group_end(tcp_sock_read_rx); + /* TX read-write hotpath cache lines */ + __cacheline_group_begin(tcp_sock_write_tx) ____cacheline_aligned; + u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut + * The total number of segments sent. + */ + u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut + * total number of data segments sent. + */ + u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut + * total number of data bytes sent. + */ + u32 snd_sml; /* Last byte of the most recently transmitted small packet */ + u32 chrono_start; /* Start time in jiffies of a TCP chrono */ + u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ + u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ + u32 pushed_seq; /* Last pushed seq, required to talk to windows */ + u32 lsndtime; + u32 mdev_us; /* medium deviation */ + u64 tcp_wstamp_ns; /* departure time for next sent data packet */ + u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ + u64 tcp_mstamp; /* most recent packet received/sent */ + u32 rtt_seq; /* sequence number to update rttvar */ + struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ + struct sk_buff *highest_sack; /* skb just after the highest + * skb with SACKed bit set + * (validity guaranteed only if + * sacked_out > 0) + */ + u8 ecn_flags; /* ECN status bits. */ + __cacheline_group_end(tcp_sock_write_tx); + + /* TXRX read-write hotpath cache lines */ + __cacheline_group_begin(tcp_sock_write_txrx); /* * Header prediction flags * 0x5?10 << 16 + snd_wnd in net byte order */ __be32 pred_flags; - + u32 rcv_nxt; /* What we want to receive next */ + u32 snd_nxt; /* Next sequence we send */ + u32 snd_una; /* First byte we want an ack for */ + u32 window_clamp; /* Maximal window to advertise */ + u32 srtt_us; /* smoothed round trip time << 3 in usecs */ + u32 packets_out; /* Packets which are "in flight" */ + u32 snd_up; /* Urgent pointer */ + u32 delivered; /* Total data packets delivered incl. rexmits */ + u32 delivered_ce; /* Like the above but only ECE marked packets */ + u32 app_limited; /* limited until "delivered" reaches this val */ + u32 rcv_wnd; /* Current receiver window */ /* - * RFC793 variables by their proper names. This means you can - * read the code and the spec side by side (and laugh ...) - * See RFC793 and RFC1122. The RFC writes these in capitals. + * Options received (usually on last packet, some only on SYN packets). */ - u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived + struct tcp_options_received rx_opt; + u8 nonagle : 4,/* Disable Nagle algorithm? */ + rate_app_limited:1; /* rate_{delivered,interval_us} limited? */ + __cacheline_group_end(tcp_sock_write_txrx); + + /* RX read-write hotpath cache lines */ + __cacheline_group_begin(tcp_sock_write_rx); + u64 bytes_received; + /* RFC4898 tcpEStatsAppHCThruOctetsReceived * sum(delta(rcv_nxt)), or how many bytes * were acked. */ @@ -201,45 +314,44 @@ struct tcp_sock { u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn * total number of data segments in. */ - u32 rcv_nxt; /* What we want to receive next */ - u32 copied_seq; /* Head of yet unread data */ u32 rcv_wup; /* rcv_nxt on last window update sent */ - u32 snd_nxt; /* Next sequence we send */ - u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut - * The total number of segments sent. - */ - u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut - * total number of data segments sent. - */ - u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut - * total number of data bytes sent. - */ + u32 max_packets_out; /* max packets_out in last window */ + u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */ + u32 rate_delivered; /* saved rate sample: packets delivered */ + u32 rate_interval_us; /* saved rate sample: time elapsed */ + u32 rcv_rtt_last_tsecr; + u64 first_tx_mstamp; /* start of window send phase */ + u64 delivered_mstamp; /* time we reached "delivered" */ u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked * sum(delta(snd_una)), or how many bytes * were acked. */ + struct { + u32 rtt_us; + u32 seq; + u64 time; + } rcv_rtt_est; +/* Receiver queue space */ + struct { + u32 space; + u32 seq; + u64 time; + } rcvq_space; + __cacheline_group_end(tcp_sock_write_rx); + /* End of Hot Path */ + +/* + * RFC793 variables by their proper names. This means you can + * read the code and the spec side by side (and laugh ...) + * See RFC793 and RFC1122. The RFC writes these in capitals. + */ u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups * total number of DSACK blocks received */ - u32 snd_una; /* First byte we want an ack for */ - u32 snd_sml; /* Last byte of the most recently transmitted small packet */ - u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ - u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ u32 compressed_ack_rcv_nxt; - - u32 tsoffset; /* timestamp offset */ - struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ - struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ - u32 snd_wl1; /* Sequence for window update */ - u32 snd_wnd; /* The window we expect to receive */ - u32 max_window; /* Maximal window ever seen from peer */ - u32 mss_cache; /* Cached effective mss, not including SACKS */ - - u32 window_clamp; /* Maximal window to advertise */ - u32 rcv_ssthresh; /* Current window clamp */ u8 scaling_ratio; /* see tcp_win_from_space() */ /* Information of the most recently (s)acked skb */ struct tcp_rack { @@ -253,23 +365,16 @@ struct tcp_sock { dsack_seen:1, /* Whether DSACK seen after last adj */ advanced:1; /* mstamp advanced since last lost marking */ } rack; - u16 advmss; /* Advertised MSS */ u8 compressed_ack; u8 dup_ack_counter:2, tlp_retrans:1, /* TLP is a retransmission */ - unused:5; - u32 chrono_start; /* Start time in jiffies of a TCP chrono */ - u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ - u8 chrono_type:2, /* current chronograph type */ - rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ + tcp_usec_ts:1, /* TSval values in usec */ + unused:4; + u8 thin_lto : 1,/* Use linear timeouts for thin streams */ + recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ - is_sack_reneg:1, /* in recovery from loss with SACK reneg? */ - fastopen_client_fail:2; /* reason why fastopen failed */ - u8 nonagle : 4,/* Disable Nagle algorithm? */ - thin_lto : 1,/* Use linear timeouts for thin streams */ - recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ - repair : 1, + fastopen_client_fail:2, /* reason why fastopen failed */ frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ u8 repair_queue; u8 save_syn:2, /* Save headers of SYN packet */ @@ -277,45 +382,19 @@ struct tcp_sock { syn_fastopen:1, /* SYN includes Fast Open option */ syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ syn_fastopen_ch:1, /* Active TFO re-enabling probe */ - syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ - is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ - u32 tlp_high_seq; /* snd_nxt at the time of TLP */ + syn_data_acked:1;/* data in SYN is acked by SYN-ACK */ u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ - u64 tcp_wstamp_ns; /* departure time for next sent data packet */ - u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ /* RTT measurement */ - u64 tcp_mstamp; /* most recent packet received/sent */ - u32 srtt_us; /* smoothed round trip time << 3 in usecs */ - u32 mdev_us; /* medium deviation */ u32 mdev_max_us; /* maximal mdev for the last rtt period */ - u32 rttvar_us; /* smoothed mdev_max */ - u32 rtt_seq; /* sequence number to update rttvar */ - struct minmax rtt_min; - u32 packets_out; /* Packets which are "in flight" */ - u32 retrans_out; /* Retransmitted packets out */ - u32 max_packets_out; /* max packets_out in last window */ - u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */ - - u16 urg_data; /* Saved octet of OOB data and control flags */ - u8 ecn_flags; /* ECN status bits. */ u8 keepalive_probes; /* num of allowed keep alive probes */ - u32 reordering; /* Packet reordering metric. */ u32 reord_seen; /* number of data packet reordering events */ - u32 snd_up; /* Urgent pointer */ - -/* - * Options received (usually on last packet, some only on SYN packets). - */ - struct tcp_options_received rx_opt; /* * Slow start and congestion control (see also Nagle, and Karn & Partridge) */ - u32 snd_ssthresh; /* Slow start size threshold */ - u32 snd_cwnd; /* Sending congestion window */ u32 snd_cwnd_cnt; /* Linear increase counter */ u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ u32 snd_cwnd_used; @@ -323,32 +402,10 @@ struct tcp_sock { u32 prior_cwnd; /* cwnd right before starting loss recovery */ u32 prr_delivered; /* Number of newly delivered packets to * receiver in Recovery. */ - u32 prr_out; /* Total number of pkts sent during Recovery. */ - u32 delivered; /* Total data packets delivered incl. rexmits */ - u32 delivered_ce; /* Like the above but only ECE marked packets */ - u32 lost; /* Total data packets lost incl. rexmits */ - u32 app_limited; /* limited until "delivered" reaches this val */ - u64 first_tx_mstamp; /* start of window send phase */ - u64 delivered_mstamp; /* time we reached "delivered" */ - u32 rate_delivered; /* saved rate sample: packets delivered */ - u32 rate_interval_us; /* saved rate sample: time elapsed */ - - u32 rcv_wnd; /* Current receiver window */ - u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ - u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ - u32 pushed_seq; /* Last pushed seq, required to talk to windows */ - u32 lost_out; /* Lost packets */ - u32 sacked_out; /* SACK'd packets */ struct hrtimer pacing_timer; struct hrtimer compressed_ack_timer; - /* from STCP, retrans queue hinting */ - struct sk_buff* lost_skb_hint; - struct sk_buff *retransmit_skb_hint; - - /* OOO segments go in this rbtree. Socket lock must be held. */ - struct rb_root out_of_order_queue; struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */ /* SACKs data, these 2 need to be together (see tcp_options_write) */ @@ -357,12 +414,6 @@ struct tcp_sock { struct tcp_sack_block recv_sack_cache[4]; - struct sk_buff *highest_sack; /* skb just after the highest - * skb with SACKed bit set - * (validity guaranteed only if - * sacked_out > 0) - */ - int lost_cnt_hint; u32 prior_ssthresh; /* ssthresh saved at recovery start */ @@ -377,6 +428,14 @@ struct tcp_sock { * Total data bytes retransmitted */ u32 total_retrans; /* Total retransmits for entire connection */ + u32 rto_stamp; /* Start time (ms) of last CA_Loss recovery */ + u16 total_rto; /* Total number of RTO timeouts, including + * SYN/SYN-ACK and recurring timeouts. + */ + u16 total_rto_recoveries; /* Total number of RTO recoveries, + * including any unfinished recovery. + */ + u32 total_rto_time; /* ms spent in (completed) RTO recoveries. */ u32 urg_seq; /* Seq of received urgent pointer */ unsigned int keepalive_time; /* time before keep alive takes place */ @@ -405,21 +464,6 @@ struct tcp_sock { u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */ -/* Receiver side RTT estimation */ - u32 rcv_rtt_last_tsecr; - struct { - u32 rtt_us; - u32 seq; - u64 time; - } rcv_rtt_est; - -/* Receiver queue space */ - struct { - u32 space; - u32 seq; - u64 time; - } rcvq_space; - /* TCP-specific MTU probe information. */ struct { u32 probe_seq_start; @@ -437,13 +481,18 @@ struct tcp_sock { bool syn_smc; /* SYN includes SMC */ #endif -#ifdef CONFIG_TCP_MD5SIG -/* TCP AF-Specific parts; only used by MD5 Signature support so far */ +#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) +/* TCP AF-Specific parts; only used by TCP-AO/MD5 Signature support so far */ const struct tcp_sock_af_ops *af_specific; +#ifdef CONFIG_TCP_MD5SIG /* TCP MD5 Signature Option information */ struct tcp_md5sig_info __rcu *md5sig_info; #endif +#ifdef CONFIG_TCP_AO + struct tcp_ao_info __rcu *ao_info; +#endif +#endif /* TCP fastopen related information */ struct tcp_fastopen_request *fastopen_req; @@ -463,15 +512,17 @@ enum tsq_enum { TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call * tcp_v{4|6}_mtu_reduced() */ + TCP_ACK_DEFERRED, /* TX pure ack is deferred */ }; enum tsq_flags { - TSQF_THROTTLED = (1UL << TSQ_THROTTLED), - TSQF_QUEUED = (1UL << TSQ_QUEUED), - TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED), - TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), - TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), - TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), + TSQF_THROTTLED = BIT(TSQ_THROTTLED), + TSQF_QUEUED = BIT(TSQ_QUEUED), + TCPF_TSQ_DEFERRED = BIT(TCP_TSQ_DEFERRED), + TCPF_WRITE_TIMER_DEFERRED = BIT(TCP_WRITE_TIMER_DEFERRED), + TCPF_DELACK_TIMER_DEFERRED = BIT(TCP_DELACK_TIMER_DEFERRED), + TCPF_MTU_REDUCED_DEFERRED = BIT(TCP_MTU_REDUCED_DEFERRED), + TCPF_ACK_DEFERRED = BIT(TCP_ACK_DEFERRED), }; #define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk) @@ -497,6 +548,9 @@ struct tcp_timewait_sock { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif +#ifdef CONFIG_TCP_AO + struct tcp_ao_info __rcu *ao_info; +#endif }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) @@ -566,4 +620,9 @@ void tcp_sock_set_quickack(struct sock *sk, int val); int tcp_sock_set_syncnt(struct sock *sk, int val); int tcp_sock_set_user_timeout(struct sock *sk, int val); +static inline bool dst_tcp_usec_ts(const struct dst_entry *dst) +{ + return dst_feature(dst, RTAX_FEATURE_TCP_USEC_TS); +} + #endif /* _LINUX_TCP_H */ diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 17eb1c5205d3..911ddf92dcee 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -84,6 +84,7 @@ struct tee_param { * @release: release this open file * @open_session: open a new session * @close_session: close a session + * @system_session: declare session as a system session * @invoke_func: invoke a trusted function * @cancel_req: request cancel of an ongoing invoke or open * @supp_recv: called for supplicant to get a command @@ -100,6 +101,7 @@ struct tee_driver_ops { struct tee_ioctl_open_session_arg *arg, struct tee_param *param); int (*close_session)(struct tee_context *ctx, u32 session); + int (*system_session)(struct tee_context *ctx, u32 session); int (*invoke_func)(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, struct tee_param *param); @@ -430,6 +432,20 @@ int tee_client_open_session(struct tee_context *ctx, int tee_client_close_session(struct tee_context *ctx, u32 session); /** + * tee_client_system_session() - Declare session as a system session + * @ctx: TEE Context + * @session: Session id + * + * This function requests TEE to provision an entry context ready to use for + * that session only. The provisioned entry context is used for command + * invocation and session closure, not for command cancelling requests. + * TEE releases the provisioned context upon session closure. + * + * Return < 0 on error else 0 if an entry context has been provisioned. + */ +int tee_client_system_session(struct tee_context *ctx, u32 session); + +/** * tee_client_invoke_func() - Invoke a function in a Trusted Application * @ctx: TEE Context * @arg: Invoke arguments, see description of diff --git a/include/linux/thermal.h b/include/linux/thermal.h index b449a46766f5..b7a3deb372fd 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -32,6 +32,7 @@ struct thermal_zone_device; struct thermal_cooling_device; struct thermal_instance; +struct thermal_debugfs; struct thermal_attr; enum thermal_trend { @@ -51,6 +52,25 @@ enum thermal_notify_event { THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */ THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */ THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */ + THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */ + THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */ + THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */ +}; + +/** + * struct thermal_trip - representation of a point in temperature domain + * @temperature: temperature value in miliCelsius + * @hysteresis: relative hysteresis in miliCelsius + * @threshold: trip crossing notification threshold miliCelsius + * @type: trip point type + * @priv: pointer to driver data associated with this trip + */ +struct thermal_trip { + int temperature; + int hysteresis; + int threshold; + enum thermal_trip_type type; + void *priv; }; struct thermal_zone_device_ops { @@ -62,34 +82,16 @@ struct thermal_zone_device_ops { int (*set_trips) (struct thermal_zone_device *, int, int); int (*change_mode) (struct thermal_zone_device *, enum thermal_device_mode); - int (*get_trip_type) (struct thermal_zone_device *, int, - enum thermal_trip_type *); - int (*get_trip_temp) (struct thermal_zone_device *, int, int *); int (*set_trip_temp) (struct thermal_zone_device *, int, int); - int (*get_trip_hyst) (struct thermal_zone_device *, int, int *); int (*set_trip_hyst) (struct thermal_zone_device *, int, int); int (*get_crit_temp) (struct thermal_zone_device *, int *); int (*set_emul_temp) (struct thermal_zone_device *, int); - int (*get_trend) (struct thermal_zone_device *, int, - enum thermal_trend *); + int (*get_trend) (struct thermal_zone_device *, + const struct thermal_trip *, enum thermal_trend *); void (*hot)(struct thermal_zone_device *); void (*critical)(struct thermal_zone_device *); }; -/** - * struct thermal_trip - representation of a point in temperature domain - * @temperature: temperature value in miliCelsius - * @hysteresis: relative hysteresis in miliCelsius - * @type: trip point type - * @priv: pointer to driver data associated with this trip - */ -struct thermal_trip { - int temperature; - int hysteresis; - enum thermal_trip_type type; - void *priv; -}; - struct thermal_cooling_device_ops { int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); @@ -101,7 +103,7 @@ struct thermal_cooling_device_ops { struct thermal_cooling_device { int id; - char *type; + const char *type; unsigned long max_state; struct device device; struct device_node *np; @@ -112,6 +114,9 @@ struct thermal_cooling_device { struct mutex lock; /* protect thermal_instances list */ struct list_head thermal_instances; struct list_head node; +#ifdef CONFIG_THERMAL_DEBUGFS + struct thermal_debugfs *debugfs; +#endif }; /** @@ -119,6 +124,7 @@ struct thermal_cooling_device { * @id: unique id number for each thermal zone * @type: the thermal zone device type * @device: &struct device for this thermal zone + * @removal: removal completion * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature * @trip_type_attrs: attributes for trip points for sysfs: trip type * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis @@ -126,7 +132,6 @@ struct thermal_cooling_device { * @devdata: private pointer for device private data * @trips: an array of struct thermal_trip * @num_trips: number of trip points the thermal zone supports - * @trips_disabled; bitmap for disabled trips * @passive_delay_jiffies: number of jiffies to wait between polls when * performing passive cooling. * @polling_delay_jiffies: number of jiffies to wait between polls when @@ -154,11 +159,13 @@ struct thermal_cooling_device { * @node: node in thermal_tz_list (in thermal_core.c) * @poll_queue: delayed work for polling * @notify_event: Last notification event + * @suspended: thermal zone suspend indicator */ struct thermal_zone_device { int id; char type[THERMAL_NAME_LENGTH]; struct device device; + struct completion removal; struct attribute_group trips_attribute_group; struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; @@ -167,7 +174,6 @@ struct thermal_zone_device { void *devdata; struct thermal_trip *trips; int num_trips; - unsigned long trips_disabled; /* bitmap for disabled trips */ unsigned long passive_delay_jiffies; unsigned long polling_delay_jiffies; int temperature; @@ -187,6 +193,10 @@ struct thermal_zone_device { struct list_head node; struct delayed_work poll_queue; enum thermal_notify_event notify_event; +#ifdef CONFIG_THERMAL_DEBUGFS + struct thermal_debugfs *debugfs; +#endif + bool suspended; }; /** @@ -199,13 +209,18 @@ struct thermal_zone_device { * thermal zone. * @throttle: callback called for every trip point even if temperature is * below the trip point temperature + * @update_tz: callback called when thermal zone internals have changed, e.g. + * thermal cooling instance was added/removed * @governor_list: node in thermal_governor_list (in thermal_core.c) */ struct thermal_governor { char name[THERMAL_NAME_LENGTH]; int (*bind_to_tz)(struct thermal_zone_device *tz); void (*unbind_from_tz)(struct thermal_zone_device *tz); - int (*throttle)(struct thermal_zone_device *tz, int trip); + int (*throttle)(struct thermal_zone_device *tz, + const struct thermal_trip *trip); + void (*update_tz)(struct thermal_zone_device *tz, + enum thermal_notify_event reason); struct list_head governor_list; }; @@ -285,53 +300,57 @@ int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, struct thermal_trip *trip); int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, struct thermal_trip *trip); - -int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id, - const struct thermal_trip *trip); - int for_each_thermal_trip(struct thermal_zone_device *tz, int (*cb)(struct thermal_trip *, void *), void *data); +int thermal_zone_for_each_trip(struct thermal_zone_device *tz, + int (*cb)(struct thermal_trip *, void *), + void *data); int thermal_zone_get_num_trips(struct thermal_zone_device *tz); +void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, + struct thermal_trip *trip, int temp); int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp); -#ifdef CONFIG_THERMAL_ACPI -int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp); -int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp); -int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp); -int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp); -#endif - #ifdef CONFIG_THERMAL -struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, - void *, struct thermal_zone_device_ops *, - const struct thermal_zone_params *, int, int); - -void thermal_zone_device_unregister(struct thermal_zone_device *); - -struct thermal_zone_device * -thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int, - void *, struct thermal_zone_device_ops *, - const struct thermal_zone_params *, int, int); +struct thermal_zone_device *thermal_zone_device_register_with_trips( + const char *type, + struct thermal_trip *trips, + int num_trips, int mask, + void *devdata, + struct thermal_zone_device_ops *ops, + const struct thermal_zone_params *tzp, + int passive_delay, int polling_delay); + +struct thermal_zone_device *thermal_tripless_zone_device_register( + const char *type, + void *devdata, + struct thermal_zone_device_ops *ops, + const struct thermal_zone_params *tzp); + +void thermal_zone_device_unregister(struct thermal_zone_device *tz); void *thermal_zone_device_priv(struct thermal_zone_device *tzd); const char *thermal_zone_device_type(struct thermal_zone_device *tzd); int thermal_zone_device_id(struct thermal_zone_device *tzd); struct device *thermal_zone_device(struct thermal_zone_device *tzd); +int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + unsigned long upper, unsigned long lower, + unsigned int weight); int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *, unsigned long, unsigned long, unsigned int); +int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev); int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); -void thermal_zone_device_exec(struct thermal_zone_device *tz, - void (*cb)(struct thermal_zone_device *, - unsigned long), - unsigned long data); struct thermal_cooling_device *thermal_cooling_device_register(const char *, void *, const struct thermal_cooling_device_ops *); @@ -354,15 +373,26 @@ int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); void thermal_zone_device_critical(struct thermal_zone_device *tz); #else -static inline struct thermal_zone_device *thermal_zone_device_register( - const char *type, int trips, int mask, void *devdata, - struct thermal_zone_device_ops *ops, - const struct thermal_zone_params *tzp, - int passive_delay, int polling_delay) +static inline struct thermal_zone_device *thermal_zone_device_register_with_trips( + const char *type, + struct thermal_trip *trips, + int num_trips, int mask, + void *devdata, + struct thermal_zone_device_ops *ops, + const struct thermal_zone_params *tzp, + int passive_delay, int polling_delay) +{ return ERR_PTR(-ENODEV); } + +static inline struct thermal_zone_device *thermal_tripless_zone_device_register( + const char *type, + void *devdata, + struct thermal_zone_device_ops *ops, + const struct thermal_zone_params *tzp) { return ERR_PTR(-ENODEV); } -static inline void thermal_zone_device_unregister( - struct thermal_zone_device *tz) + +static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz) { } + static inline struct thermal_cooling_device * thermal_cooling_device_register(const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 02333f47c994..2c835e5c41f6 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -86,7 +86,7 @@ struct tb { unsigned long privdata[]; }; -extern struct bus_type tb_bus_type; +extern const struct bus_type tb_bus_type; extern struct device_type tb_service_type; extern struct device_type tb_xdomain_type; @@ -175,7 +175,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); * enum tb_link_width - Thunderbolt/USB4 link width * @TB_LINK_WIDTH_SINGLE: Single lane link * @TB_LINK_WIDTH_DUAL: Dual lane symmetric link - * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 trasmitters + * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 transmitters * @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers */ enum tb_link_width { diff --git a/include/linux/tick.h b/include/linux/tick.h index 9459fef5b857..716d17f31c45 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -140,14 +140,6 @@ extern unsigned long tick_nohz_get_idle_calls(void); extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); - -static inline void tick_nohz_idle_stop_tick_protected(void) -{ - local_irq_disable(); - tick_nohz_idle_stop_tick(); - local_irq_enable(); -} - #else /* !CONFIG_NO_HZ_COMMON */ #define tick_nohz_enabled (0) static inline int tick_nohz_tick_stopped(void) { return 0; } @@ -170,8 +162,6 @@ static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } - -static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h index 03d9c5ac01d1..876e31b4461d 100644 --- a/include/linux/time_namespace.h +++ b/include/linux/time_namespace.h @@ -7,10 +7,13 @@ #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/err.h> +#include <linux/time64.h> struct user_namespace; extern struct user_namespace init_user_ns; +struct vm_area_struct; + struct timens_offsets { struct timespec64 monotonic; struct timespec64 boottime; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index fe1e467ba046..7c43e98cf211 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -4,6 +4,7 @@ #include <linux/errno.h> #include <linux/clocksource_ids.h> +#include <linux/ktime.h> /* Included from linux/ktime.h */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 9162f275819a..f18a2f1eb79e 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -7,21 +7,7 @@ #include <linux/stddef.h> #include <linux/debugobjects.h> #include <linux/stringify.h> - -struct timer_list { - /* - * All fields that change during normal runtime grouped to the - * same cacheline - */ - struct hlist_node entry; - unsigned long expires; - void (*function)(struct timer_list *); - u32 flags; - -#ifdef CONFIG_LOCKDEP - struct lockdep_map lockdep_map; -#endif -}; +#include <linux/timer_types.h> #ifdef CONFIG_LOCKDEP /* @@ -77,8 +63,7 @@ struct timer_list { .entry = { .next = TIMER_ENTRY_STATIC }, \ .function = (_function), \ .flags = (_flags), \ - __TIMER_LOCKDEP_MAP_INITIALIZER( \ - __FILE__ ":" __stringify(__LINE__)) \ + __TIMER_LOCKDEP_MAP_INITIALIZER(FILE_LINE) \ } #define DEFINE_TIMER(_name, _function) \ diff --git a/include/linux/timer_types.h b/include/linux/timer_types.h new file mode 100644 index 000000000000..fae5a388f914 --- /dev/null +++ b/include/linux/timer_types.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TIMER_TYPES_H +#define _LINUX_TIMER_TYPES_H + +#include <linux/lockdep_types.h> +#include <linux/types.h> + +struct timer_list { + /* + * All fields that change during normal runtime grouped to the + * same cacheline + */ + struct hlist_node entry; + unsigned long expires; + void (*function)(struct timer_list *); + u32 flags; + +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif +}; + +#endif /* _LINUX_TIMER_TYPES_H */ diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index adc80e29168e..62973f7d4610 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -3,18 +3,7 @@ #define _LINUX_TIMERQUEUE_H #include <linux/rbtree.h> -#include <linux/ktime.h> - - -struct timerqueue_node { - struct rb_node node; - ktime_t expires; -}; - -struct timerqueue_head { - struct rb_root_cached rb_root; -}; - +#include <linux/timerqueue_types.h> extern bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node); diff --git a/include/linux/timerqueue_types.h b/include/linux/timerqueue_types.h new file mode 100644 index 000000000000..dc298d0923e3 --- /dev/null +++ b/include/linux/timerqueue_types.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TIMERQUEUE_TYPES_H +#define _LINUX_TIMERQUEUE_TYPES_H + +#include <linux/rbtree_types.h> +#include <linux/types.h> + +struct timerqueue_node { + struct rb_node node; + ktime_t expires; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +#endif /* _LINUX_TIMERQUEUE_TYPES_H */ diff --git a/include/linux/tnum.h b/include/linux/tnum.h index 1c3948a1d6ad..3c13240077b8 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -106,6 +106,10 @@ int tnum_sbin(char *str, size_t size, struct tnum a); struct tnum tnum_subreg(struct tnum a); /* Returns the tnum with the lower 32-bit subreg cleared */ struct tnum tnum_clear_subreg(struct tnum a); +/* Returns the tnum with the lower 32-bit subreg in *reg* set to the lower + * 32-bit subreg in *subreg* + */ +struct tnum tnum_with_subreg(struct tnum reg, struct tnum subreg); /* Returns the tnum with the lower 32-bit subreg set to value */ struct tnum tnum_const_subreg(struct tnum a, u32 value); /* Returns true if 32-bit subreg @a is a known constant*/ diff --git a/include/linux/topology.h b/include/linux/topology.h index fea32377f7c7..52f5850730b3 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -251,7 +251,7 @@ extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int #else static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) { - return cpumask_nth(cpu, cpus); + return cpumask_nth_and(cpu, cpus, cpu_online_mask); } static inline const struct cpumask * diff --git a/include/linux/torture.h b/include/linux/torture.h index bb466eec01e4..1541454da03e 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -21,6 +21,7 @@ #include <linux/debugobjects.h> #include <linux/bug.h> #include <linux/compiler.h> +#include <linux/hrtimer.h> /* Definitions for a non-string torture-test module parameter. */ #define torture_param(type, name, init, msg) \ @@ -81,7 +82,8 @@ static inline void torture_random_init(struct torture_random_state *trsp) } /* Definitions for high-resolution-timer sleeps. */ -int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp); +int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode, + struct torture_random_state *trsp); int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp); int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp); int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp); @@ -120,10 +122,15 @@ void _torture_stop_kthread(char *m, struct task_struct **tp); #define torture_stop_kthread(n, tp) \ _torture_stop_kthread("Stopping " #n " task", &(tp)) +/* Scheduler-related definitions. */ #ifdef CONFIG_PREEMPTION #define torture_preempt_schedule() __preempt_schedule() #else #define torture_preempt_schedule() do { } while (0) #endif +#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST) +long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask); +#endif + #endif /* __LINUX_TORTURE_H */ diff --git a/include/linux/trace.h b/include/linux/trace.h index 2a70a447184c..fdcd76b7be83 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -51,7 +51,7 @@ int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...); int trace_array_init_printk(struct trace_array *tr); void trace_array_put(struct trace_array *tr); -struct trace_array *trace_array_get_by_name(const char *name); +struct trace_array *trace_array_get_by_name(const char *name, const char *systems); int trace_array_destroy(struct trace_array *tr); /* For osnoise tracer */ @@ -84,7 +84,7 @@ static inline int trace_array_init_printk(struct trace_array *tr) static inline void trace_array_put(struct trace_array *tr) { } -static inline struct trace_array *trace_array_get_by_name(const char *name) +static inline struct trace_array *trace_array_get_by_name(const char *name, const char *systems) { return NULL; } diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index eb5c3add939b..d68ff9b1247f 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -62,13 +62,13 @@ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...); /* Used to find the offset and length of dynamic fields in trace events */ struct trace_dynamic_info { #ifdef CONFIG_CPU_BIG_ENDIAN - u16 offset; u16 len; + u16 offset; #else - u16 len; u16 offset; + u16 len; #endif -}; +} __packed; /* * The trace entry - the most basic unit of tracing. This is what @@ -492,6 +492,7 @@ enum { EVENT_FILE_FL_TRIGGER_COND_BIT, EVENT_FILE_FL_PID_FILTER_BIT, EVENT_FILE_FL_WAS_ENABLED_BIT, + EVENT_FILE_FL_FREED_BIT, }; extern struct trace_event_file *trace_get_event_file(const char *instance, @@ -630,6 +631,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...); * TRIGGER_COND - When set, one or more triggers has an associated filter * PID_FILTER - When set, the event is filtered based on pid * WAS_ENABLED - Set when enabled to know to clear trace on module removal + * FREED - File descriptor is freed, all fields should be considered invalid */ enum { EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), @@ -643,14 +645,14 @@ enum { EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT), + EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT), }; struct trace_event_file { struct list_head list; struct trace_event_call *event_call; struct event_filter __rcu *filter; - struct eventfs_file *ef; - struct dentry *dir; + struct eventfs_inode *ei; struct trace_array *tr; struct trace_subsystem_dir *system; struct list_head triggers; @@ -672,6 +674,7 @@ struct trace_event_file { * caching and such. Which is mostly OK ;-) */ unsigned long flags; + atomic_t ref; /* ref count for opened files */ atomic_t sm_ref; /* soft-mode reference counter */ atomic_t tm_ref; /* trigger-mode reference counter */ }; @@ -762,7 +765,8 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, - u64 *probe_offset, u64 *probe_addr); + u64 *probe_offset, u64 *probe_addr, + unsigned long *missed); int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else @@ -802,7 +806,7 @@ static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) static inline int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, - u64 *probe_addr) + u64 *probe_addr, unsigned long *missed) { return -EOPNOTSUPP; } @@ -878,6 +882,7 @@ extern void perf_kprobe_destroy(struct perf_event *event); extern int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, const char **symbol, u64 *probe_offset, u64 *probe_addr, + unsigned long *missed, bool perf_type_tracepoint); #endif #ifdef CONFIG_UPROBE_EVENTS diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 6be92bf559fe..9ec229dfddaa 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -8,20 +8,25 @@ /* * Trace sequences are used to allow a function to call several other functions - * to create a string of data to use (up to a max of PAGE_SIZE). + * to create a string of data to use. */ +#define TRACE_SEQ_BUFFER_SIZE (PAGE_SIZE * 2 - \ + (sizeof(struct seq_buf) + sizeof(size_t) + sizeof(int))) + struct trace_seq { - char buffer[PAGE_SIZE]; + char buffer[TRACE_SEQ_BUFFER_SIZE]; struct seq_buf seq; + size_t readpos; int full; }; static inline void trace_seq_init(struct trace_seq *s) { - seq_buf_init(&s->seq, s->buffer, PAGE_SIZE); + seq_buf_init(&s->seq, s->buffer, TRACE_SEQ_BUFFER_SIZE); s->full = 0; + s->readpos = 0; } /** diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h index 009072792fa3..7a5fe17b6bf9 100644 --- a/include/linux/tracefs.h +++ b/include/linux/tracefs.h @@ -23,26 +23,69 @@ struct file_operations; struct eventfs_file; -struct dentry *eventfs_create_events_dir(const char *name, - struct dentry *parent); - -struct eventfs_file *eventfs_add_subsystem_dir(const char *name, - struct dentry *parent); +/** + * eventfs_callback - A callback function to create dynamic files in eventfs + * @name: The name of the file that is to be created + * @mode: return the file mode for the file (RW access, etc) + * @data: data to pass to the created file ops + * @fops: the file operations of the created file + * + * The evetnfs files are dynamically created. The struct eventfs_entry array + * is passed to eventfs_create_dir() or eventfs_create_events_dir() that will + * be used to create the files within those directories. When a lookup + * or access to a file within the directory is made, the struct eventfs_entry + * array is used to find a callback() with the matching name that is being + * referenced (for lookups, the entire array is iterated and each callback + * will be called). + * + * The callback will be called with @name for the name of the file to create. + * The callback can return less than 1 to indicate that no file should be + * created. + * + * If a file is to be created, then @mode should be populated with the file + * mode (permissions) for which the file is created for. This would be + * used to set the created inode i_mode field. + * + * The @data should be set to the data passed to the other file operations + * (read, write, etc). Note, @data will also point to the data passed in + * to eventfs_create_dir() or eventfs_create_events_dir(), but the callback + * can replace the data if it chooses to. Otherwise, the original data + * will be used for the file operation functions. + * + * The @fops should be set to the file operations that will be used to create + * the inode. + * + * NB. This callback is called while holding internal locks of the eventfs + * system. The callback must not call any code that might also call into + * the tracefs or eventfs system or it will risk creating a deadlock. + */ +typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data, + const struct file_operations **fops); -struct eventfs_file *eventfs_add_dir(const char *name, - struct eventfs_file *ef_parent); +/** + * struct eventfs_entry - dynamically created eventfs file call back handler + * @name: Then name of the dynamic file in an eventfs directory + * @callback: The callback to get the fops of the file when it is created + * + * See evenfs_callback() typedef for how to set up @callback. + */ +struct eventfs_entry { + const char *name; + eventfs_callback callback; +}; -int eventfs_add_file(const char *name, umode_t mode, - struct eventfs_file *ef_parent, void *data, - const struct file_operations *fops); +struct eventfs_inode; -int eventfs_add_events_file(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops); +struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent, + const struct eventfs_entry *entries, + int size, void *data); -void eventfs_remove(struct eventfs_file *ef); +struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent, + const struct eventfs_entry *entries, + int size, void *data); -void eventfs_remove_events_dir(struct dentry *dentry); +void eventfs_remove_events_dir(struct eventfs_inode *ei); +void eventfs_remove_dir(struct eventfs_inode *ei); struct dentry *tracefs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, diff --git a/include/linux/tsm.h b/include/linux/tsm.h new file mode 100644 index 000000000000..de8324a2223c --- /dev/null +++ b/include/linux/tsm.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __TSM_H +#define __TSM_H + +#include <linux/sizes.h> +#include <linux/types.h> + +#define TSM_INBLOB_MAX 64 +#define TSM_OUTBLOB_MAX SZ_32K + +/* + * Privilege level is a nested permission concept to allow confidential + * guests to partition address space, 4-levels are supported. + */ +#define TSM_PRIVLEVEL_MAX 3 + +/** + * struct tsm_desc - option descriptor for generating tsm report blobs + * @privlevel: optional privilege level to associate with @outblob + * @inblob_len: sizeof @inblob + * @inblob: arbitrary input data + */ +struct tsm_desc { + unsigned int privlevel; + size_t inblob_len; + u8 inblob[TSM_INBLOB_MAX]; +}; + +/** + * struct tsm_report - track state of report generation relative to options + * @desc: input parameters to @report_new() + * @outblob_len: sizeof(@outblob) + * @outblob: generated evidence to provider to the attestation agent + * @auxblob_len: sizeof(@auxblob) + * @auxblob: (optional) auxiliary data to the report (e.g. certificate data) + */ +struct tsm_report { + struct tsm_desc desc; + size_t outblob_len; + u8 *outblob; + size_t auxblob_len; + u8 *auxblob; +}; + +/** + * struct tsm_ops - attributes and operations for tsm instances + * @name: tsm id reflected in /sys/kernel/config/tsm/report/$report/provider + * @privlevel_floor: convey base privlevel for nested scenarios + * @report_new: Populate @report with the report blob and auxblob + * (optional), return 0 on successful population, or -errno otherwise + * + * Implementation specific ops, only one is expected to be registered at + * a time i.e. only one of "sev-guest", "tdx-guest", etc. + */ +struct tsm_ops { + const char *name; + const unsigned int privlevel_floor; + int (*report_new)(struct tsm_report *report, void *data); +}; + +extern const struct config_item_type tsm_report_default_type; + +/* publish @privlevel, @privlevel_floor, and @auxblob attributes */ +extern const struct config_item_type tsm_report_extra_type; + +int tsm_register(const struct tsm_ops *ops, void *priv, + const struct config_item_type *type); +int tsm_unregister(const struct tsm_ops *ops); +#endif /* __TSM_H */ diff --git a/include/linux/tty.h b/include/linux/tty.h index f002d0f25db7..8c76fd97d4ad 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -242,7 +242,7 @@ struct tty_struct { void *driver_data; spinlock_t files_lock; int write_cnt; - unsigned char *write_buf; + u8 *write_buf; struct list_head tty_files; @@ -390,14 +390,14 @@ int vcs_init(void); extern const struct class tty_class; /** - * tty_kref_get - get a tty reference - * @tty: tty device + * tty_kref_get - get a tty reference + * @tty: tty device * - * Return a new reference to a tty object. The caller must hold - * sufficient locks/counts to ensure that their existing reference cannot - * go away + * Returns: a new reference to a tty object + * + * Locking: The caller must hold sufficient locks/counts to ensure that their + * existing reference cannot go away. */ - static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) { if (tty) @@ -410,17 +410,18 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout); void stop_tty(struct tty_struct *tty); void start_tty(struct tty_struct *tty); void tty_write_message(struct tty_struct *tty, char *msg); -int tty_send_xchar(struct tty_struct *tty, char ch); -int tty_put_char(struct tty_struct *tty, unsigned char c); +int tty_send_xchar(struct tty_struct *tty, u8 ch); +int tty_put_char(struct tty_struct *tty, u8 c); unsigned int tty_chars_in_buffer(struct tty_struct *tty); unsigned int tty_write_room(struct tty_struct *tty); void tty_driver_flush_buffer(struct tty_struct *tty); void tty_unthrottle(struct tty_struct *tty); -int tty_throttle_safe(struct tty_struct *tty); -int tty_unthrottle_safe(struct tty_struct *tty); +bool tty_throttle_safe(struct tty_struct *tty); +bool tty_unthrottle_safe(struct tty_struct *tty); int tty_do_resize(struct tty_struct *tty, struct winsize *ws); int tty_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); +int tty_get_tiocm(struct tty_struct *tty); int is_current_pgrp_orphaned(void); void tty_hangup(struct tty_struct *tty); void tty_vhangup(struct tty_struct *tty); @@ -435,16 +436,14 @@ void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud); /** - * tty_get_baud_rate - get tty bit rates - * @tty: tty to query + * tty_get_baud_rate - get tty bit rates + * @tty: tty to query * - * Returns the baud rate as an integer for this terminal. The - * termios lock must be held by the caller and the terminal bit - * flags may be updated. + * Returns: the baud rate as an integer for this terminal * - * Locking: none + * Locking: The termios lock must be held by the caller. */ -static inline speed_t tty_get_baud_rate(struct tty_struct *tty) +static inline speed_t tty_get_baud_rate(const struct tty_struct *tty) { return tty_termios_baud_rate(&tty->termios); } diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 18beff0cec1a..7372124fbf90 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -72,8 +72,7 @@ struct serial_struct; * is closed for the last time freeing up the resources. This is * actually the second part of shutdown for routines that might sleep. * - * @write: ``ssize_t ()(struct tty_struct *tty, const unsigned char *buf, - * size_t count)`` + * @write: ``ssize_t ()(struct tty_struct *tty, const u8 *buf, size_t count)`` * * This routine is called by the kernel to write a series (@count) of * characters (@buf) to the @tty device. The characters may come from @@ -85,7 +84,7 @@ struct serial_struct; * * Optional: Required for writable devices. May not sleep. * - * @put_char: ``int ()(struct tty_struct *tty, unsigned char ch)`` + * @put_char: ``int ()(struct tty_struct *tty, u8 ch)`` * * This routine is called by the kernel to write a single character @ch to * the @tty device. If the kernel uses this routine, it must call the @@ -243,7 +242,7 @@ struct serial_struct; * Optional: If not provided, the device is assumed to have no FIFO. * Usually correct to invoke via tty_wait_until_sent(). May sleep. * - * @send_xchar: ``void ()(struct tty_struct *tty, char ch)`` + * @send_xchar: ``void ()(struct tty_struct *tty, u8 ch)`` * * This routine is used to send a high-priority XON/XOFF character (@ch) * to the @tty device. @@ -375,7 +374,7 @@ struct tty_operations { void (*flush_buffer)(struct tty_struct *tty); void (*set_ldisc)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, int timeout); - void (*send_xchar)(struct tty_struct *tty, char ch); + void (*send_xchar)(struct tty_struct *tty, u8 ch); int (*tiocmget)(struct tty_struct *tty); int (*tiocmset)(struct tty_struct *tty, unsigned int set, unsigned int clear); diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h index 6b367eb17979..1b861f2100b6 100644 --- a/include/linux/tty_port.h +++ b/include/linux/tty_port.h @@ -114,8 +114,8 @@ struct tty_port { unsigned char console:1; struct mutex mutex; struct mutex buf_mutex; - unsigned char *xmit_buf; - DECLARE_KFIFO_PTR(xmit_fifo, unsigned char); + u8 *xmit_buf; + DECLARE_KFIFO_PTR(xmit_fifo, u8); unsigned int close_delay; unsigned int closing_wait; int drain_delay; @@ -149,10 +149,10 @@ struct device *tty_port_register_device_attr(struct tty_port *port, const struct attribute_group **attr_grp); struct device *tty_port_register_device_serdev(struct tty_port *port, struct tty_driver *driver, unsigned index, - struct device *device); + struct device *host, struct device *parent); struct device *tty_port_register_device_attr_serdev(struct tty_port *port, struct tty_driver *driver, unsigned index, - struct device *device, void *drvdata, + struct device *host, struct device *parent, void *drvdata, const struct attribute_group **attr_grp); void tty_port_unregister_device(struct tty_port *port, struct tty_driver *driver, unsigned index); diff --git a/include/linux/types.h b/include/linux/types.h index 253168bb3fe1..2bc8766ba20c 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -120,6 +120,9 @@ typedef s64 int64_t; #define aligned_be64 __aligned_be64 #define aligned_le64 __aligned_le64 +/* Nanosecond scalar representation for kernel time values */ +typedef s64 ktime_t; + /** * The type used for indexing onto a disc or disc partition. * diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h index cf3ada3e820e..c499ae809c7d 100644 --- a/include/linux/ucs2_string.h +++ b/include/linux/ucs2_string.h @@ -10,6 +10,7 @@ typedef u16 ucs2_char_t; unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength); unsigned long ucs2_strlen(const ucs2_char_t *s); unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); +ssize_t ucs2_strscpy(ucs2_char_t *dst, const ucs2_char_t *src, size_t count); int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); unsigned long ucs2_utf8size(const ucs2_char_t *src); diff --git a/include/linux/udp.h b/include/linux/udp.h index 43c1fb2d2c21..d04188714dca 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -32,25 +32,30 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) return (num + net_hash_mix(net)) & mask; } +enum { + UDP_FLAGS_CORK, /* Cork is required */ + UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */ + UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */ + UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */ + UDP_FLAGS_ACCEPT_FRAGLIST, + UDP_FLAGS_ACCEPT_L4, + UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */ + UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */ + UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */ +}; + struct udp_sock { /* inet_sock has to be the first member */ struct inet_sock inet; #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0] #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1] #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node + + unsigned long udp_flags; + int pending; /* Any pending frames ? */ - unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ - unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ - encap_enabled:1, /* This socket enabled encap - * processing; UDP tunnels and - * different encapsulation layer set - * this - */ - gro_enabled:1, /* Request GRO aggregation */ - accept_udp_l4:1, - accept_udp_fraglist:1; + /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -62,12 +67,6 @@ struct udp_sock { */ __u16 pcslen; __u16 pcrlen; -/* indicator bits used by pcflag: */ -#define UDPLITE_BIT 0x1 /* set by udplite proto init function */ -#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */ -#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */ - __u8 pcflag; /* marks socket as UDP-Lite if > 0 */ - __u8 unused[3]; /* * For encapsulation sockets. */ @@ -95,28 +94,39 @@ struct udp_sock { int forward_threshold; }; +#define udp_test_bit(nr, sk) \ + test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) +#define udp_set_bit(nr, sk) \ + set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) +#define udp_test_and_set_bit(nr, sk) \ + test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) +#define udp_clear_bit(nr, sk) \ + clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) +#define udp_assign_bit(nr, sk, val) \ + assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val) + #define UDP_MAX_SEGMENTS (1 << 6UL) #define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk) static inline void udp_set_no_check6_tx(struct sock *sk, bool val) { - udp_sk(sk)->no_check6_tx = val; + udp_assign_bit(NO_CHECK6_TX, sk, val); } static inline void udp_set_no_check6_rx(struct sock *sk, bool val) { - udp_sk(sk)->no_check6_rx = val; + udp_assign_bit(NO_CHECK6_RX, sk, val); } -static inline bool udp_get_no_check6_tx(struct sock *sk) +static inline bool udp_get_no_check6_tx(const struct sock *sk) { - return udp_sk(sk)->no_check6_tx; + return udp_test_bit(NO_CHECK6_TX, sk); } -static inline bool udp_get_no_check6_rx(struct sock *sk) +static inline bool udp_get_no_check6_rx(const struct sock *sk) { - return udp_sk(sk)->no_check6_rx; + return udp_test_bit(NO_CHECK6_RX, sk); } static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, @@ -135,10 +145,12 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) if (!skb_is_gso(skb)) return false; - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4) + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && + !udp_test_bit(ACCEPT_L4, sk)) return true; - if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist) + if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && + !udp_test_bit(ACCEPT_FRAGLIST, sk)) return true; return false; @@ -146,8 +158,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) static inline void udp_allow_gso(struct sock *sk) { - udp_sk(sk)->accept_udp_l4 = 1; - udp_sk(sk)->accept_udp_fraglist = 1; + udp_set_bit(ACCEPT_L4, sk); + udp_set_bit(ACCEPT_FRAGLIST, sk); } #define udp_portaddr_for_each_entry(__sk, list) \ diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h index b0542cd11aeb..f85ec5613721 100644 --- a/include/linux/uidgid.h +++ b/include/linux/uidgid.h @@ -12,20 +12,12 @@ * to detect when we overlook these differences. * */ -#include <linux/types.h> +#include <linux/uidgid_types.h> #include <linux/highuid.h> struct user_namespace; extern struct user_namespace init_user_ns; - -typedef struct { - uid_t val; -} kuid_t; - - -typedef struct { - gid_t val; -} kgid_t; +struct uid_gid_map; #define KUIDT_INIT(value) (kuid_t){ value } #define KGIDT_INIT(value) (kgid_t){ value } @@ -138,6 +130,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) return from_kgid(ns, gid) != (gid_t) -1; } +u32 map_id_down(struct uid_gid_map *map, u32 id); +u32 map_id_up(struct uid_gid_map *map, u32 id); + #else static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid) @@ -186,6 +181,15 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) return gid_valid(gid); } +static inline u32 map_id_down(struct uid_gid_map *map, u32 id) +{ + return id; +} + +static inline u32 map_id_up(struct uid_gid_map *map, u32 id) +{ + return id; +} #endif /* CONFIG_USER_NS */ #endif /* _LINUX_UIDGID_H */ diff --git a/include/linux/uidgid_types.h b/include/linux/uidgid_types.h new file mode 100644 index 000000000000..b35ac4955a33 --- /dev/null +++ b/include/linux/uidgid_types.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_UIDGID_TYPES_H +#define _LINUX_UIDGID_TYPES_H + +#include <linux/types.h> + +typedef struct { + uid_t val; +} kuid_t; + +typedef struct { + gid_t val; +} kgid_t; + +#endif /* _LINUX_UIDGID_TYPES_H */ diff --git a/include/linux/uio.h b/include/linux/uio.h index 42bce38a8e87..bea9c89922d9 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -21,12 +21,12 @@ struct kvec { enum iter_type { /* iter types */ + ITER_UBUF, ITER_IOVEC, - ITER_KVEC, ITER_BVEC, + ITER_KVEC, ITER_XARRAY, ITER_DISCARD, - ITER_UBUF, }; #define ITER_SOURCE 1 // == WRITE @@ -43,11 +43,7 @@ struct iov_iter { bool copy_mc; bool nofault; bool data_source; - bool user_backed; - union { - size_t iov_offset; - int last_offset; - }; + size_t iov_offset; /* * Hack alert: overlay ubuf_iovec with iovec + count, so * that the members resolve correctly regardless of the type @@ -143,7 +139,7 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i) static inline bool user_backed_iter(const struct iov_iter *i) { - return i->user_backed; + return iter_is_ubuf(i) || iter_is_iovec(i); } /* @@ -342,27 +338,6 @@ iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes) return npages; } -struct csum_state { - __wsum csum; - size_t off; -}; - -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i); -size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); - -static __always_inline __must_check -bool csum_and_copy_from_iter_full(void *addr, size_t bytes, - __wsum *csum, struct iov_iter *i) -{ - size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i); - if (likely(copied == bytes)) - return true; - iov_iter_revert(i, copied); - return false; -} -size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, - struct iov_iter *i); - struct iovec *iovec_from_user(const struct iovec __user *uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_iov, bool compat); @@ -372,8 +347,6 @@ ssize_t import_iovec(int type, const struct iovec __user *uvec, ssize_t __import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i, bool compat); -int import_single_range(int type, void __user *buf, size_t len, - struct iovec *iov, struct iov_iter *i); int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i); static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, @@ -383,7 +356,6 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, *i = (struct iov_iter) { .iter_type = ITER_UBUF, .copy_mc = false, - .user_backed = true, .data_source = direction, .ubuf = buf, .count = count, diff --git a/include/linux/units.h b/include/linux/units.h index 2793a41e73a2..45110daaf8d3 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -2,6 +2,7 @@ #ifndef _LINUX_UNITS_H #define _LINUX_UNITS_H +#include <linux/bits.h> #include <linux/math.h> /* Metric prefixes in accordance with Système international (d'unités) */ @@ -31,6 +32,10 @@ #define MICROWATT_PER_MILLIWATT 1000UL #define MICROWATT_PER_WATT 1000000UL +#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE) +#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE) +#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE) + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) diff --git a/include/linux/usb.h b/include/linux/usb.h index a21074861f91..9e52179872a5 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -632,7 +632,6 @@ struct usb3_lpm_parameters { * @reset_resume: needs reset instead of resume * @port_is_suspended: the upstream port is suspended (L2 or U3) * @slot_id: Slot ID assigned by xHCI - * @removable: Device can be physically removed from this port * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout. * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout. * @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout. @@ -1145,16 +1144,6 @@ extern ssize_t usb_store_new_id(struct usb_dynids *dynids, extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf); /** - * struct usbdrv_wrap - wrapper for driver-model structure - * @driver: The driver-model core driver structure. - * @for_devices: Non-zero for device drivers, 0 for interface drivers. - */ -struct usbdrv_wrap { - struct device_driver driver; - int for_devices; -}; - -/** * struct usb_driver - identifies USB interface driver to usbcore * @name: The driver name should be unique among USB drivers, * and should normally be the same as the module name. @@ -1194,7 +1183,7 @@ struct usbdrv_wrap { * is bound to the driver. * @dynids: used internally to hold the list of dynamically added device * ids for this driver. - * @drvwrap: Driver-model core structure wrapper. + * @driver: The driver-model core driver structure. * @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be * added to this driver by preventing the sysfs file from being created. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend @@ -1242,13 +1231,13 @@ struct usb_driver { const struct attribute_group **dev_groups; struct usb_dynids dynids; - struct usbdrv_wrap drvwrap; + struct device_driver driver; unsigned int no_dynamic_id:1; unsigned int supports_autosuspend:1; unsigned int disable_hub_initiated_lpm:1; unsigned int soft_unbind:1; }; -#define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver) +#define to_usb_driver(d) container_of(d, struct usb_driver, driver) /** * struct usb_device_driver - identifies USB device driver to usbcore @@ -1264,9 +1253,12 @@ struct usb_driver { * module is being unloaded. * @suspend: Called when the device is going to be suspended by the system. * @resume: Called when the device is being resumed by the system. + * @choose_configuration: If non-NULL, called instead of the default + * usb_choose_configuration(). If this returns an error then we'll go + * on to call the normal usb_choose_configuration(). * @dev_groups: Attributes attached to the device that will be created once it * is bound to the driver. - * @drvwrap: Driver-model core structure wrapper. + * @driver: The driver-model core driver structure. * @id_table: used with @match() to select better matching driver at * probe() time. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend @@ -1275,7 +1267,7 @@ struct usb_driver { * resume and suspend functions will be called in addition to the driver's * own, so this part of the setup does not need to be replicated. * - * USB drivers must provide all the fields listed above except drvwrap, + * USB drivers must provide all the fields listed above except driver, * match, and id_table. */ struct usb_device_driver { @@ -1287,14 +1279,17 @@ struct usb_device_driver { int (*suspend) (struct usb_device *udev, pm_message_t message); int (*resume) (struct usb_device *udev, pm_message_t message); + + int (*choose_configuration) (struct usb_device *udev); + const struct attribute_group **dev_groups; - struct usbdrv_wrap drvwrap; + struct device_driver driver; const struct usb_device_id *id_table; unsigned int supports_autosuspend:1; unsigned int generic_subclass:1; }; #define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \ - drvwrap.driver) + driver) /** * struct usb_class_driver - identifies a USB driver that wants to use the USB major number @@ -1823,22 +1818,6 @@ void *usb_alloc_coherent(struct usb_device *dev, size_t size, void usb_free_coherent(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma); -#if 0 -struct urb *usb_buffer_map(struct urb *urb); -void usb_buffer_dmasync(struct urb *urb); -void usb_buffer_unmap(struct urb *urb); -#endif - -struct scatterlist; -int usb_buffer_map_sg(const struct usb_device *dev, int is_in, - struct scatterlist *sg, int nents); -#if 0 -void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, - struct scatterlist *sg, int n_hw_ents); -#endif -void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, - struct scatterlist *sg, int n_hw_ents); - /*-------------------------------------------------------------------* * SYNCHRONOUS CALL SUPPORT * *-------------------------------------------------------------------*/ diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 0b4f2d5faa08..5a7f96684ea2 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -64,6 +64,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_PMQOS BIT(15) #define CI_HDRC_PHY_VBUS_CONTROL BIT(16) #define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17) +#define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 6014340ba980..af3cd2aae4bc 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -35,6 +35,14 @@ * are ready. The control transfer will then be kept from completing till * all the function drivers that requested for USB_GADGET_DELAYED_STAUS * invoke usb_composite_setup_continue(). + * + * NOTE: USB_GADGET_DELAYED_STATUS must not be used in UDC drivers: they + * must delay completing the status stage for 0-length control transfers + * regardless of the whether USB_GADGET_DELAYED_STATUS is returned from + * the gadget driver's setup() callback. + * Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS, + * which is a bug. These drivers must be fixed and USB_GADGET_DELAYED_STATUS + * must be contained within the composite framework. */ #define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 75bda0783395..a771ccc038ac 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -236,6 +236,7 @@ struct usb_ep { unsigned max_streams:16; unsigned mult:2; unsigned maxburst:5; + unsigned fifo_mode:1; u8 address; const struct usb_endpoint_descriptor *desc; const struct usb_ss_ep_comp_descriptor *comp_desc; @@ -711,6 +712,15 @@ static inline int usb_gadget_check_config(struct usb_gadget *gadget) * get_interface. Setting a configuration (or interface) is where * endpoints should be activated or (config 0) shut down. * + * The gadget driver's setup() callback does not have to queue a response to + * ep0 within the setup() call, the driver can do it after setup() returns. + * The UDC driver must wait until such a response is queued before proceeding + * with the data/status stages of the control transfer. + * + * NOTE: Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS + * being returned from the setup() callback, which is a bug. See the comment + * next to USB_GADGET_DELAYED_STATUS for details. + * * (Note that only the default control endpoint is supported. Neither * hosts nor devices generally support control traffic except to ep0.) * diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 61d4f0b793dc..cd77fc6095a1 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -372,8 +372,9 @@ struct hc_driver { * or bandwidth constraints. */ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); - /* Returns the hardware-chosen device address */ - int (*address_device)(struct usb_hcd *, struct usb_device *udev); + /* Set the hardware-chosen device address */ + int (*address_device)(struct usb_hcd *, struct usb_device *udev, + unsigned int timeout_ms); /* prepares the hardware to send commands to the device */ int (*enable_device)(struct usb_hcd *, struct usb_device *udev); /* Notifies the HCD after a hub descriptor is fetched. @@ -484,8 +485,25 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev, extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); +#ifdef CONFIG_USB_PCI_AMD extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev); +static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev, + const struct hc_driver *driver) +{ + if (!usb_hcd_amd_remote_wakeup_quirk(dev)) + return false; + if (driver->flags & (HCD_USB11 | HCD_USB3)) + return true; + return false; +} +#else /* CONFIG_USB_PCI_AMD */ +static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev, + const struct hc_driver *driver) +{ + return false; +} +#endif extern const struct dev_pm_ops usb_hcd_pci_pm_ops; #endif /* CONFIG_USB_PCI */ diff --git a/include/linux/usb/ljca.h b/include/linux/usb/ljca.h new file mode 100644 index 000000000000..47661feda96c --- /dev/null +++ b/include/linux/usb/ljca.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Intel Corporation. All rights reserved. + */ +#ifndef _LINUX_USB_LJCA_H_ +#define _LINUX_USB_LJCA_H_ + +#include <linux/auxiliary_bus.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#define LJCA_MAX_GPIO_NUM 64 + +#define auxiliary_dev_to_ljca_client(auxiliary_dev) \ + container_of(auxiliary_dev, struct ljca_client, auxdev) + +struct ljca_adapter; + +/** + * typedef ljca_event_cb_t - event callback function signature + * + * @context: the execution context of who registered this callback + * @cmd: the command from device for this event + * @evt_data: the event data payload + * @len: the event data payload length + * + * The callback function is called in interrupt context and the data payload is + * only valid during the call. If the user needs later access of the data, it + * must copy it. + */ +typedef void (*ljca_event_cb_t)(void *context, u8 cmd, const void *evt_data, int len); + +/** + * struct ljca_client - represent a ljca client device + * + * @type: ljca client type + * @id: ljca client id within same client type + * @link: ljca client on the same ljca adapter + * @auxdev: auxiliary device object + * @adapter: ljca adapter the ljca client sit on + * @context: the execution context of the event callback + * @event_cb: ljca client driver register this callback to get + * firmware asynchronous rx buffer pending notifications + * @event_cb_lock: spinlock to protect event callback + */ +struct ljca_client { + u8 type; + u8 id; + struct list_head link; + struct auxiliary_device auxdev; + struct ljca_adapter *adapter; + + void *context; + ljca_event_cb_t event_cb; + /* lock to protect event_cb */ + spinlock_t event_cb_lock; +}; + +/** + * struct ljca_gpio_info - ljca gpio client device info + * + * @num: ljca gpio client device pin number + * @valid_pin_map: ljca gpio client device valid pin mapping + */ +struct ljca_gpio_info { + unsigned int num; + DECLARE_BITMAP(valid_pin_map, LJCA_MAX_GPIO_NUM); +}; + +/** + * struct ljca_i2c_info - ljca i2c client device info + * + * @id: ljca i2c client device identification number + * @capacity: ljca i2c client device capacity + * @intr_pin: ljca i2c client device interrupt pin number if exists + */ +struct ljca_i2c_info { + u8 id; + u8 capacity; + u8 intr_pin; +}; + +/** + * struct ljca_spi_info - ljca spi client device info + * + * @id: ljca spi client device identification number + * @capacity: ljca spi client device capacity + */ +struct ljca_spi_info { + u8 id; + u8 capacity; +}; + +/** + * ljca_register_event_cb - register a callback function to receive events + * + * @client: ljca client device + * @event_cb: callback function + * @context: execution context of event callback + * + * Return: 0 in case of success, negative value in case of error + */ +int ljca_register_event_cb(struct ljca_client *client, ljca_event_cb_t event_cb, void *context); + +/** + * ljca_unregister_event_cb - unregister the callback function for an event + * + * @client: ljca client device + */ +void ljca_unregister_event_cb(struct ljca_client *client); + +/** + * ljca_transfer - issue a LJCA command and wait for a response + * + * @client: ljca client device + * @cmd: the command to be sent to the device + * @obuf: the buffer to be sent to the device; it can be NULL if the user + * doesn't need to transmit data with this command + * @obuf_len: the size of the buffer to be sent to the device; it should + * be 0 when obuf is NULL + * @ibuf: any data associated with the response will be copied here; it can be + * NULL if the user doesn't need the response data + * @ibuf_len: must be initialized to the input buffer size + * + * Return: the actual length of response data for success, negative value for errors + */ +int ljca_transfer(struct ljca_client *client, u8 cmd, const u8 *obuf, + u8 obuf_len, u8 *ibuf, u8 ibuf_len); + +/** + * ljca_transfer_noack - issue a LJCA command without a response + * + * @client: ljca client device + * @cmd: the command to be sent to the device + * @obuf: the buffer to be sent to the device; it can be NULL if the user + * doesn't need to transmit data with this command + * @obuf_len: the size of the buffer to be sent to the device + * + * Return: 0 for success, negative value for errors + */ +int ljca_transfer_noack(struct ljca_client *client, u8 cmd, const u8 *obuf, + u8 obuf_len); + +#endif diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index c59fb79a42e8..eb626af0e4e7 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -228,6 +228,7 @@ enum pd_pdo_type { #define PDO_FIXED_UNCHUNK_EXT BIT(24) /* Unchunked Extended Message supported (Source) */ #define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */ #define PDO_FIXED_FRS_CURR_SHIFT 23 +#define PDO_FIXED_PEAK_CURR_SHIFT 20 #define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */ #define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */ diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h index b057250704e8..3a747938cdab 100644 --- a/include/linux/usb/pd_vdo.h +++ b/include/linux/usb/pd_vdo.h @@ -376,6 +376,7 @@ | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \ | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7)) +#define VDO_TYPEC_CABLE_SPEED(vdo) ((vdo) & 0x7) #define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3) /* diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index b513749582d7..e4de6bc1f69b 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -144,10 +144,6 @@ struct usb_phy { */ int (*set_wakeup)(struct usb_phy *x, bool enabled); - /* notify phy port status change */ - int (*notify_port_status)(struct usb_phy *x, int port, - u16 portstatus, u16 portchange); - /* notify phy connect status change */ int (*notify_connect)(struct usb_phy *x, enum usb_device_speed speed); @@ -321,15 +317,6 @@ usb_phy_set_wakeup(struct usb_phy *x, bool enabled) } static inline int -usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange) -{ - if (x && x->notify_port_status) - return x->notify_port_status(x, port, portstatus, portchange); - else - return 0; -} - -static inline int usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed) { if (x && x->notify_connect) diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index eeb7c2157c72..59409c1fc3de 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -72,4 +72,7 @@ /* device has endpoints that should be ignored */ #define USB_QUIRK_ENDPOINT_IGNORE BIT(15) +/* short SET_ADDRESS request timeout */ +#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT BIT(16) + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h index 287e9d83fb8b..33a4c146dc19 100644 --- a/include/linux/usb/r8152.h +++ b/include/linux/usb/r8152.h @@ -30,6 +30,7 @@ #define VENDOR_ID_NVIDIA 0x0955 #define VENDOR_ID_TPLINK 0x2357 #define VENDOR_ID_DLINK 0x2001 +#define VENDOR_ID_ASUS 0x0b05 #if IS_REACHABLE(CONFIG_USB_RTL8152) extern u8 rtl8152_get_version(struct usb_interface *intf); diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index d418c55523a7..372898d9eeb0 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -5,16 +5,6 @@ * Copyright (C) 2011 Renesas Solutions Corp. * Copyright (C) 2019 Renesas Electronics Corporation * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * */ #ifndef RENESAS_USB_H #define RENESAS_USB_H diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h index 83376473ac76..467e8045e9f8 100644 --- a/include/linux/usb/tcpci.h +++ b/include/linux/usb/tcpci.h @@ -36,7 +36,9 @@ #define TCPC_ALERT_MASK 0x12 #define TCPC_POWER_STATUS_MASK 0x14 -#define TCPC_FAULT_STATUS_MASK 0x15 + +#define TCPC_FAULT_STATUS_MASK 0x15 +#define TCPC_FAULT_STATUS_MASK_VCONN_OC BIT(1) #define TCPC_EXTENDED_STATUS_MASK 0x16 #define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0) @@ -104,6 +106,7 @@ #define TCPC_FAULT_STATUS 0x1f #define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7) +#define TCPC_FAULT_STATUS_VCONN_OC BIT(1) #define TCPC_ALERT_EXTENDED 0x21 diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index ab7ca872950b..65fac5e1f317 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -173,5 +173,6 @@ void tcpm_pd_hard_reset(struct tcpm_port *port); void tcpm_tcpc_reset(struct tcpm_port *port); void tcpm_port_clean(struct tcpm_port *port); bool tcpm_port_is_toggling(struct tcpm_port *port); +void tcpm_port_error_recovery(struct tcpm_port *port); #endif /* __LINUX_USB_TCPM_H */ diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 8fa781207970..a05d6f6f2536 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -202,6 +202,8 @@ struct typec_cable_desc { * @accessory: Audio, Debug or none. * @identity: Discover Identity command data * @pd_revision: USB Power Delivery Specification Revision if supported + * @attach: Notification about attached USB device + * @deattach: Notification about removed USB device * * Details about a partner that is attached to USB Type-C port. If @identity * member exists when partner is registered, a directory named "identity" is @@ -217,6 +219,9 @@ struct typec_partner_desc { enum typec_accessory accessory; struct usb_pd_identity *identity; u16 pd_revision; /* 0300H = "3.0" */ + + void (*attach)(struct typec_partner *partner, struct device *dev); + void (*deattach)(struct typec_partner *partner, struct device *dev); }; /** @@ -335,4 +340,36 @@ int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_ int typec_partner_set_usb_power_delivery(struct typec_partner *partner, struct usb_power_delivery *pd); +/** + * struct typec_connector - Representation of Type-C port for external drivers + * @attach: notification about device removal + * @deattach: notification about device removal + * + * Drivers that control the USB and other ports (DisplayPorts, etc.), that are + * connected to the Type-C connectors, can use these callbacks to inform the + * Type-C connector class about connections and disconnections. That information + * can then be used by the typec-port drivers to power on or off parts that are + * needed or not needed - as an example, in USB mode if USB2 device is + * enumerated, USB3 components (retimers, phys, and what have you) do not need + * to be powered on. + * + * The attached (enumerated) devices will be liked with the typec-partner device. + */ +struct typec_connector { + void (*attach)(struct typec_connector *con, struct device *dev); + void (*deattach)(struct typec_connector *con, struct device *dev); +}; + +static inline void typec_attach(struct typec_connector *con, struct device *dev) +{ + if (con && con->attach) + con->attach(con, dev); +} + +static inline void typec_deattach(struct typec_connector *con, struct device *dev) +{ + if (con && con->deattach) + con->deattach(con, dev); +} + #endif /* __LINUX_USB_TYPEC_H */ diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h index 8d09c2f0a9b8..1f358098522d 100644 --- a/include/linux/usb/typec_dp.h +++ b/include/linux/usb/typec_dp.h @@ -67,8 +67,10 @@ enum { #define DP_CAP_UFP_D 1 #define DP_CAP_DFP_D 2 #define DP_CAP_DFP_D_AND_UFP_D 3 -#define DP_CAP_DP_SIGNALING BIT(2) /* Always set */ -#define DP_CAP_GEN2 BIT(3) /* Reserved after v1.0b */ +#define DP_CAP_DP_SIGNALLING(_cap_) (((_cap_) & GENMASK(5, 2)) >> 2) +#define DP_CAP_SIGNALLING_HBR3 1 +#define DP_CAP_SIGNALLING_UHBR10 2 +#define DP_CAP_SIGNALLING_UHBR20 3 #define DP_CAP_RECEPTACLE BIT(6) #define DP_CAP_USB BIT(7) #define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8) @@ -78,6 +80,13 @@ enum { DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_)) #define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_)) +#define DP_CAP_UHBR_13_5_SUPPORT BIT(26) +#define DP_CAP_CABLE_TYPE(_cap_) (((_cap_) & GENMASK(29, 28)) >> 28) +#define DP_CAP_CABLE_TYPE_PASSIVE 0 +#define DP_CAP_CABLE_TYPE_RE_TIMER 1 +#define DP_CAP_CABLE_TYPE_RE_DRIVER 2 +#define DP_CAP_CABLE_TYPE_OPTICAL 3 +#define DP_CAP_DPAM_VERSION BIT(30) /* DisplayPort Status Update VDO bits */ #define DP_STATUS_CONNECTION(_status_) ((_status_) & 3) @@ -97,13 +106,24 @@ enum { #define DP_CONF_CURRENTLY(_conf_) ((_conf_) & 3) #define DP_CONF_UFP_U_AS_DFP_D BIT(0) #define DP_CONF_UFP_U_AS_UFP_D BIT(1) -#define DP_CONF_SIGNALING_DP BIT(2) -#define DP_CONF_SIGNALING_GEN_2 BIT(3) /* Reserved after v1.0b */ +#define DP_CONF_SIGNALLING_MASK GENMASK(5, 2) +#define DP_CONF_SIGNALLING_SHIFT 2 +#define DP_CONF_SIGNALLING_HBR3 1 +#define DP_CONF_SIGNALLING_UHBR10 2 +#define DP_CONF_SIGNALLING_UHBR20 3 #define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8 #define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8) /* Helper for setting/getting the pin assignment value to the configuration */ #define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8) #define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8) +#define DP_CONF_UHBR13_5_SUPPORT BIT(26) +#define DP_CONF_CABLE_TYPE_MASK GENMASK(29, 28) +#define DP_CONF_CABLE_TYPE_SHIFT 28 +#define DP_CONF_CABLE_TYPE_PASSIVE 0 +#define DP_CONF_CABLE_TYPE_RE_TIMER 1 +#define DP_CONF_CABLE_TYPE_RE_DRIVER 2 +#define DP_CONF_CABLE_TYPE_OPTICAL 3 +#define DP_CONF_DPAM_VERSION BIT(30) #endif /* __USB_TYPEC_DP_H */ diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h index 63dd44b72e0c..c7a2153bd6f5 100644 --- a/include/linux/usb/typec_tbt.h +++ b/include/linux/usb/typec_tbt.h @@ -46,6 +46,7 @@ struct typec_thunderbolt_data { #define TBT_CABLE_OPTICAL BIT(21) #define TBT_CABLE_RETIMER BIT(22) #define TBT_CABLE_LINK_TRAINING BIT(23) +#define TBT_CABLE_ACTIVE_PASSIVE BIT(25) #define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16) #define TBT_SET_CABLE_ROUNDED(_g_) (((_g_) & GENMASK(1, 0)) << 19) diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 45f09bec02c4..6030a8235617 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -65,6 +65,10 @@ enum rlimit_type { UCOUNT_RLIMIT_COUNTS, }; +#if IS_ENABLED(CONFIG_BINFMT_MISC) +struct binfmt_misc; +#endif + struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; @@ -102,6 +106,10 @@ struct user_namespace { struct ucounts *ucounts; long ucount_max[UCOUNT_COUNTS]; long rlimit_max[UCOUNT_RLIMIT_COUNTS]; + +#if IS_ENABLED(CONFIG_BINFMT_MISC) + struct binfmt_misc *binfmt_misc; +#endif } __randomize_layout; struct ucounts { diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index ac8c6854097c..e4056547fbe6 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -93,6 +93,17 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm, extern long uffd_wp_range(struct vm_area_struct *vma, unsigned long start, unsigned long len, bool enable_wp); +/* move_pages */ +void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2); +void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2); +ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm, + unsigned long dst_start, unsigned long src_start, + unsigned long len, __u64 flags); +int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval, + struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, + unsigned long dst_addr, unsigned long src_addr); + /* mm helpers */ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, struct vm_userfaultfd_ctx vm_ctx) @@ -161,11 +172,22 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) } static inline bool vma_can_userfault(struct vm_area_struct *vma, - unsigned long vm_flags) + unsigned long vm_flags, + bool wp_async) { + vm_flags &= __VM_UFFD_FLAGS; + if ((vm_flags & VM_UFFD_MINOR) && (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) return false; + + /* + * If wp async enabled, and WP is the only mode enabled, allow any + * memory type. + */ + if (wp_async && (vm_flags == VM_UFFD_WP)) + return true; + #ifndef CONFIG_PTE_MARKER_UFFD_WP /* * If user requested uffd-wp but not enabled pte markers for @@ -175,6 +197,8 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma, if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) return false; #endif + + /* By default, allow any of anon|shmem|hugetlb */ return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || vma_is_shmem(vma); } @@ -197,6 +221,7 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, extern void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf); extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma); +extern bool userfaultfd_wp_async(struct vm_area_struct *vma); #else /* CONFIG_USERFAULTFD */ @@ -207,6 +232,13 @@ static inline vm_fault_t handle_userfault(struct vm_fault *vmf, return VM_FAULT_SIGBUS; } +static inline long uffd_wp_range(struct vm_area_struct *vma, + unsigned long start, unsigned long len, + bool enable_wp) +{ + return false; +} + static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, struct vm_userfaultfd_ctx vm_ctx) { @@ -297,6 +329,11 @@ static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) return false; } +static inline bool userfaultfd_wp_async(struct vm_area_struct *vma) +{ + return false; +} + #endif /* CONFIG_USERFAULTFD */ static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 0e652026b776..db15ac07f8a6 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -204,6 +204,16 @@ struct vdpa_map_file { * @vdev: vdpa device * @idx: virtqueue index * Returns u32: group id for this virtqueue + * @get_vq_desc_group: Get the group id for the descriptor table of + * a specific virtqueue (optional) + * @vdev: vdpa device + * @idx: virtqueue index + * Returns u32: group id for the descriptor table + * portion of this virtqueue. Could be different + * than the one from @get_vq_group, in which case + * the access to the descriptor table can be + * confined to a separate asid, isolating from + * the virtqueue's buffer address access. * @get_device_features: Get virtio features supported by the device * @vdev: vdpa device * Returns the virtio features support by the @@ -242,6 +252,17 @@ struct vdpa_map_file { * @reset: Reset device * @vdev: vdpa device * Returns integer: success (0) or error (< 0) + * @compat_reset: Reset device with compatibility quirks to + * accommodate older userspace. Only needed by + * parent driver which used to have bogus reset + * behaviour, and has to maintain such behaviour + * for compatibility with older userspace. + * Historically compliant driver only has to + * implement .reset, Historically non-compliant + * driver should implement both. + * @vdev: vdpa device + * @flags: compatibility quirks for reset + * Returns integer: success (0) or error (< 0) * @suspend: Suspend the device (optional) * @vdev: vdpa device * Returns integer: success (0) or error (< 0) @@ -317,6 +338,15 @@ struct vdpa_map_file { * @iova: iova to be unmapped * @size: size of the area * Returns integer: success (0) or error (< 0) + * @reset_map: Reset device memory mapping to the default + * state (optional) + * Needed for devices that are using device + * specific DMA translation and prefer mapping + * to be decoupled from the virtio life cycle, + * i.e. device .reset op does not reset mapping + * @vdev: vdpa device + * @asid: address space identifier + * Returns integer: success (0) or error (< 0) * @get_vq_dma_dev: Get the dma device for a specific * virtqueue (optional) * @vdev: vdpa device @@ -360,6 +390,7 @@ struct vdpa_config_ops { /* Device ops */ u32 (*get_vq_align)(struct vdpa_device *vdev); u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx); + u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx); u64 (*get_device_features)(struct vdpa_device *vdev); u64 (*get_backend_features)(const struct vdpa_device *vdev); int (*set_driver_features)(struct vdpa_device *vdev, u64 features); @@ -373,6 +404,8 @@ struct vdpa_config_ops { u8 (*get_status)(struct vdpa_device *vdev); void (*set_status)(struct vdpa_device *vdev, u8 status); int (*reset)(struct vdpa_device *vdev); + int (*compat_reset)(struct vdpa_device *vdev, u32 flags); +#define VDPA_RESET_F_CLEAN_MAP 1 int (*suspend)(struct vdpa_device *vdev); int (*resume)(struct vdpa_device *vdev); size_t (*get_config_size)(struct vdpa_device *vdev); @@ -394,6 +427,7 @@ struct vdpa_config_ops { u64 iova, u64 size, u64 pa, u32 perm, void *opaque); int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid, u64 iova, u64 size); + int (*reset_map)(struct vdpa_device *vdev, unsigned int asid); int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group, unsigned int asid); struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx); @@ -485,14 +519,17 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) return vdev->dma_dev; } -static inline int vdpa_reset(struct vdpa_device *vdev) +static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags) { const struct vdpa_config_ops *ops = vdev->config; int ret; down_write(&vdev->cf_lock); vdev->features_valid = false; - ret = ops->reset(vdev); + if (ops->compat_reset && flags) + ret = ops->compat_reset(vdev, flags); + else + ret = ops->reset(vdev); up_write(&vdev->cf_lock); return ret; } diff --git a/include/linux/verification.h b/include/linux/verification.h index f34e50ebcf60..cb2d47f28091 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -8,6 +8,7 @@ #ifndef _LINUX_VERIFICATION_H #define _LINUX_VERIFICATION_H +#include <linux/errno.h> #include <linux/types.h> /* diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 454e9295970c..89b265bc6ec3 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -69,6 +69,13 @@ struct vfio_device { u8 iommufd_attached:1; #endif u8 cdev_opened:1; +#ifdef CONFIG_DEBUG_FS + /* + * debug_root is a static property of the vfio_device + * which must be set prior to registering the vfio_device. + */ + struct dentry *debug_root; +#endif }; /** @@ -289,16 +296,12 @@ void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes, /* * External user API */ -#if IS_ENABLED(CONFIG_VFIO_GROUP) struct iommu_group *vfio_file_iommu_group(struct file *file); + +#if IS_ENABLED(CONFIG_VFIO_GROUP) bool vfio_file_is_group(struct file *file); bool vfio_file_has_dev(struct file *file, struct vfio_device *device); #else -static inline struct iommu_group *vfio_file_iommu_group(struct file *file) -{ - return NULL; -} - static inline bool vfio_file_is_group(struct file *file) { return false; diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index 562e8754869d..85e84b92751b 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -127,7 +127,27 @@ int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf); int vfio_pci_core_enable(struct vfio_pci_core_device *vdev); void vfio_pci_core_disable(struct vfio_pci_core_device *vdev); void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); +int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar); pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, pci_channel_state_t state); +#define VFIO_IOWRITE_DECLATION(size) \ +int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev, \ + bool test_mem, u##size val, void __iomem *io); + +VFIO_IOWRITE_DECLATION(8) +VFIO_IOWRITE_DECLATION(16) +VFIO_IOWRITE_DECLATION(32) +#ifdef iowrite64 +VFIO_IOWRITE_DECLATION(64) +#endif + +#define VFIO_IOREAD_DECLATION(size) \ +int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \ + bool test_mem, u##size *val, void __iomem *io); + +VFIO_IOREAD_DECLATION(8) +VFIO_IOREAD_DECLATION(16) +VFIO_IOREAD_DECLATION(32) + #endif /* VFIO_PCI_CORE_H */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 1311a7fbe675..b0201747a263 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -103,6 +103,14 @@ int virtqueue_resize(struct virtqueue *vq, u32 num, int virtqueue_reset(struct virtqueue *vq, void (*recycle)(struct virtqueue *vq, void *buf)); +struct virtio_admin_cmd { + __le16 opcode; + __le16 group_type; + __le64 group_member_id; + struct scatterlist *data_sg; + struct scatterlist *result_sg; +}; + /** * struct virtio_device - representation of a device using virtio * @index: unique position on the virtio bus @@ -191,10 +199,8 @@ struct virtio_driver { void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); void (*config_changed)(struct virtio_device *dev); -#ifdef CONFIG_PM int (*freeze)(struct virtio_device *dev); int (*restore)(struct virtio_device *dev); -#endif }; static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv) diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 2b3438de2c4d..da9b271b54db 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -93,6 +93,8 @@ typedef void vq_callback_t(struct virtqueue *); * Returns 0 on success or error status * If disable_vq_and_reset is set, then enable_vq_after_reset must also be * set. + * @create_avq: create admin virtqueue resource. + * @destroy_avq: destroy admin virtqueue resource. */ struct virtio_config_ops { void (*get)(struct virtio_device *vdev, unsigned offset, @@ -120,6 +122,8 @@ struct virtio_config_ops { struct virtio_shm_region *region, u8 id); int (*disable_vq_and_reset)(struct virtqueue *vq); int (*enable_vq_after_reset)(struct virtqueue *vq); + int (*create_avq)(struct virtio_device *vdev); + void (*destroy_avq)(struct virtio_device *vdev); }; /* If driver didn't advertise the feature, it will never appear. */ diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h deleted file mode 100644 index d2e2785af602..000000000000 --- a/include/linux/virtio_console.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so - * anyone can use the definitions to implement compatible drivers/servers: - * - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of IBM nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * Copyright (C) Red Hat, Inc., 2009, 2010, 2011 - * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011 - */ -#ifndef _LINUX_VIRTIO_CONSOLE_H -#define _LINUX_VIRTIO_CONSOLE_H - -#include <uapi/linux/virtio_console.h> - -int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); -#endif /* _LINUX_VIRTIO_CONSOLE_H */ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 7b4dd69555e4..4dfa9b69ca8d 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -3,8 +3,10 @@ #define _LINUX_VIRTIO_NET_H #include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/udp.h> #include <uapi/linux/tcp.h> -#include <uapi/linux/udp.h> #include <uapi/linux/virtio_net.h> static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type) @@ -49,6 +51,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, const struct virtio_net_hdr *hdr, bool little_endian) { + unsigned int nh_min_len = sizeof(struct iphdr); unsigned int gso_type = 0; unsigned int thlen = 0; unsigned int p_off = 0; @@ -65,6 +68,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, gso_type = SKB_GSO_TCPV6; ip_proto = IPPROTO_TCP; thlen = sizeof(struct tcphdr); + nh_min_len = sizeof(struct ipv6hdr); break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; @@ -100,7 +104,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; - p_off = skb_transport_offset(skb) + thlen; + nh_min_len = max_t(u32, nh_min_len, skb_transport_offset(skb)); + p_off = nh_min_len + thlen; if (!pskb_may_pull(skb, p_off)) return -EINVAL; } else { @@ -140,7 +145,7 @@ retry: skb_set_transport_header(skb, keys.control.thoff); } else if (gso_type) { - p_off = thlen; + p_off = nh_min_len + thlen; if (!pskb_may_pull(skb, p_off)) return -EINVAL; } @@ -151,9 +156,22 @@ retry: unsigned int nh_off = p_off; struct skb_shared_info *shinfo = skb_shinfo(skb); - /* UFO may not include transport header in gso_size. */ - if (gso_type & SKB_GSO_UDP) + switch (gso_type & ~SKB_GSO_TCP_ECN) { + case SKB_GSO_UDP: + /* UFO may not include transport header in gso_size. */ nh_off -= thlen; + break; + case SKB_GSO_UDP_L4: + if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) + return -EINVAL; + if (skb->csum_offset != offsetof(struct udphdr, check)) + return -EINVAL; + if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS) + return -EINVAL; + if (gso_type != SKB_GSO_UDP_L4) + return -EINVAL; + break; + } /* Kernel has a special handling for GSO_BY_FRAGS. */ if (gso_size == GSO_BY_FRAGS) diff --git a/include/linux/virtio_pci_admin.h b/include/linux/virtio_pci_admin.h new file mode 100644 index 000000000000..f4a100a0fe2e --- /dev/null +++ b/include/linux/virtio_pci_admin.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VIRTIO_PCI_ADMIN_H +#define _LINUX_VIRTIO_PCI_ADMIN_H + +#include <linux/types.h> +#include <linux/pci.h> + +#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY +bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev); +int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset, + u8 size, u8 *buf); +int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset, + u8 size, u8 *buf); +int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset, + u8 size, u8 *buf); +int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset, + u8 size, u8 *buf); +int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev, + u8 req_bar_flags, u8 *bar, + u64 *bar_offset); +#endif + +#endif /* _LINUX_VIRTIO_PCI_ADMIN_H */ diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index 067ac1d789bc..c0b1b1ca1163 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -5,44 +5,48 @@ #include <linux/pci.h> #include <linux/virtio_pci.h> -struct virtio_pci_modern_common_cfg { - struct virtio_pci_common_cfg cfg; - - __le16 queue_notify_data; /* read-write */ - __le16 queue_reset; /* read-write */ -}; - +/** + * struct virtio_pci_modern_device - info for modern PCI virtio + * @pci_dev: Ptr to the PCI device struct + * @common: Position of the common capability in the PCI config + * @device: Device-specific data (non-legacy mode) + * @notify_base: Base of vq notifications (non-legacy mode) + * @notify_pa: Physical base of vq notifications + * @isr: Where to read and clear interrupt + * @notify_len: So we can sanity-check accesses + * @device_len: So we can sanity-check accesses + * @notify_map_cap: Capability for when we need to map notifications per-vq + * @notify_offset_multiplier: Multiply queue_notify_off by this value + * (non-legacy mode). + * @modern_bars: Bitmask of BARs + * @id: Device and vendor id + * @device_id_check: Callback defined before vp_modern_probe() to be used to + * verify the PCI device is a vendor's expected device rather + * than the standard virtio PCI device + * Returns the found device id or ERRNO + * @dma_mask: Optional mask instead of the traditional DMA_BIT_MASK(64), + * for vendor devices with DMA space address limitations + */ struct virtio_pci_modern_device { struct pci_dev *pci_dev; struct virtio_pci_common_cfg __iomem *common; - /* Device-specific data (non-legacy mode) */ void __iomem *device; - /* Base of vq notifications (non-legacy mode). */ void __iomem *notify_base; - /* Physical base of vq notifications */ resource_size_t notify_pa; - /* Where to read and clear interrupt */ u8 __iomem *isr; - /* So we can sanity-check accesses. */ size_t notify_len; size_t device_len; + size_t common_len; - /* Capability for when we need to map notifications per-vq. */ int notify_map_cap; - /* Multiply queue_notify_off by this value. (non-legacy mode). */ u32 notify_offset_multiplier; - int modern_bars; - struct virtio_device_id id; - /* optional check for vendor virtio device, returns dev_id or -ERRNO */ int (*device_id_check)(struct pci_dev *pdev); - - /* optional mask for devices with limited DMA space */ u64 dma_mask; }; @@ -121,4 +125,6 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev); void vp_modern_remove(struct virtio_pci_modern_device *mdev); int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index); void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index); +u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev); +u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev); #endif diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index c58453699ee9..c82089dee0c8 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -12,6 +12,7 @@ struct virtio_vsock_skb_cb { bool reply; bool tap_delivered; + u32 offset; }; #define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb)) @@ -159,6 +160,15 @@ struct virtio_transport { /* Takes ownership of the packet */ int (*send_pkt)(struct sk_buff *skb); + + /* Used in MSG_ZEROCOPY mode. Checks, that provided data + * (number of buffers) could be transmitted with zerocopy + * mode. If this callback is not implemented for the current + * transport - this means that this transport doesn't need + * extra checks and can perform zerocopy transmission by + * default. + */ + bool (*can_msgzerocopy)(int bufs_num); }; ssize_t @@ -246,4 +256,5 @@ void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit); void virtio_transport_deliver_tap_pkt(struct sk_buff *skb); int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list); int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor); +int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val); #endif /* _LINUX_VIRTIO_VSOCK_H */ diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h deleted file mode 100644 index e9c0cd36c48a..000000000000 --- a/include/linux/vlynq.h +++ /dev/null @@ -1,149 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org> - */ - -#ifndef __VLYNQ_H__ -#define __VLYNQ_H__ - -#include <linux/device.h> -#include <linux/types.h> - -struct module; - -#define VLYNQ_NUM_IRQS 32 - -struct vlynq_mapping { - u32 size; - u32 offset; -}; - -enum vlynq_divisor { - vlynq_div_auto = 0, - vlynq_ldiv1, - vlynq_ldiv2, - vlynq_ldiv3, - vlynq_ldiv4, - vlynq_ldiv5, - vlynq_ldiv6, - vlynq_ldiv7, - vlynq_ldiv8, - vlynq_rdiv1, - vlynq_rdiv2, - vlynq_rdiv3, - vlynq_rdiv4, - vlynq_rdiv5, - vlynq_rdiv6, - vlynq_rdiv7, - vlynq_rdiv8, - vlynq_div_external -}; - -struct vlynq_device_id { - u32 id; - enum vlynq_divisor divisor; - unsigned long driver_data; -}; - -struct vlynq_regs; -struct vlynq_device { - u32 id, dev_id; - int local_irq; - int remote_irq; - enum vlynq_divisor divisor; - u32 regs_start, regs_end; - u32 mem_start, mem_end; - u32 irq_start, irq_end; - int irq; - int enabled; - struct vlynq_regs *local; - struct vlynq_regs *remote; - struct device dev; -}; - -struct vlynq_driver { - char *name; - struct vlynq_device_id *id_table; - int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id); - void (*remove)(struct vlynq_device *dev); - struct device_driver driver; -}; - -struct plat_vlynq_ops { - int (*on)(struct vlynq_device *dev); - void (*off)(struct vlynq_device *dev); -}; - -static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv) -{ - return container_of(drv, struct vlynq_driver, driver); -} - -static inline struct vlynq_device *to_vlynq_device(struct device *device) -{ - return container_of(device, struct vlynq_device, dev); -} - -extern struct bus_type vlynq_bus_type; - -extern int __vlynq_register_driver(struct vlynq_driver *driver, - struct module *owner); - -static inline int vlynq_register_driver(struct vlynq_driver *driver) -{ - return __vlynq_register_driver(driver, THIS_MODULE); -} - -static inline void *vlynq_get_drvdata(struct vlynq_device *dev) -{ - return dev_get_drvdata(&dev->dev); -} - -static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data) -{ - dev_set_drvdata(&dev->dev, data); -} - -static inline u32 vlynq_mem_start(struct vlynq_device *dev) -{ - return dev->mem_start; -} - -static inline u32 vlynq_mem_end(struct vlynq_device *dev) -{ - return dev->mem_end; -} - -static inline u32 vlynq_mem_len(struct vlynq_device *dev) -{ - return dev->mem_end - dev->mem_start + 1; -} - -static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq) -{ - int irq = dev->irq_start + virq; - if ((irq < dev->irq_start) || (irq > dev->irq_end)) - return -EINVAL; - - return irq; -} - -static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq) -{ - if ((irq < dev->irq_start) || (irq > dev->irq_end)) - return -EINVAL; - - return irq - dev->irq_start; -} - -extern void vlynq_unregister_driver(struct vlynq_driver *driver); -extern int vlynq_enable_device(struct vlynq_device *dev); -extern void vlynq_disable_device(struct vlynq_device *dev); -extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset, - struct vlynq_mapping *mapping); -extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset, - struct vlynq_mapping *mapping); -extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq); -extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq); - -#endif /* __VLYNQ_H__ */ diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 8abfa1240040..747943bc8cc2 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -41,9 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, PGSTEAL_KHUGEPAGED, - PGDEMOTE_KSWAPD, - PGDEMOTE_DIRECT, - PGDEMOTE_KHUGEPAGED, PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_KHUGEPAGED, @@ -145,6 +142,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_ZSWAP ZSWPIN, ZSWPOUT, + ZSWPWB, #endif #ifdef CONFIG_X86 DIRECT_MAP_LEVEL2_SPLIT, diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index fed855bae6d8..343906a98d6e 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -556,19 +556,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec, local_irq_restore(flags); } -void __mod_lruvec_page_state(struct page *page, +void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val); -static inline void mod_lruvec_page_state(struct page *page, +static inline void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val) { unsigned long flags; local_irq_save(flags); - __mod_lruvec_page_state(page, idx, val); + __lruvec_stat_mod_folio(folio, idx, val); local_irq_restore(flags); } +static inline void mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + lruvec_stat_mod_folio(page_folio(page), idx, val); +} + #else static inline void __mod_lruvec_state(struct lruvec *lruvec, @@ -583,37 +589,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec, mod_node_page_state(lruvec_pgdat(lruvec), idx, val); } -static inline void __mod_lruvec_page_state(struct page *page, - enum node_stat_item idx, int val) -{ - __mod_node_page_state(page_pgdat(page), idx, val); -} - -static inline void mod_lruvec_page_state(struct page *page, +static inline void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val) { - mod_node_page_state(page_pgdat(page), idx, val); + __mod_node_page_state(folio_pgdat(folio), idx, val); } -#endif /* CONFIG_MEMCG */ - -static inline void __inc_lruvec_page_state(struct page *page, - enum node_stat_item idx) +static inline void lruvec_stat_mod_folio(struct folio *folio, + enum node_stat_item idx, int val) { - __mod_lruvec_page_state(page, idx, 1); + mod_node_page_state(folio_pgdat(folio), idx, val); } -static inline void __dec_lruvec_page_state(struct page *page, - enum node_stat_item idx) +static inline void mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) { - __mod_lruvec_page_state(page, idx, -1); + mod_node_page_state(page_pgdat(page), idx, val); } -static inline void __lruvec_stat_mod_folio(struct folio *folio, - enum node_stat_item idx, int val) -{ - __mod_lruvec_page_state(&folio->page, idx, val); -} +#endif /* CONFIG_MEMCG */ static inline void __lruvec_stat_add_folio(struct folio *folio, enum node_stat_item idx) @@ -627,24 +621,6 @@ static inline void __lruvec_stat_sub_folio(struct folio *folio, __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); } -static inline void inc_lruvec_page_state(struct page *page, - enum node_stat_item idx) -{ - mod_lruvec_page_state(page, idx, 1); -} - -static inline void dec_lruvec_page_state(struct page *page, - enum node_stat_item idx) -{ - mod_lruvec_page_state(page, idx, -1); -} - -static inline void lruvec_stat_mod_folio(struct folio *folio, - enum node_stat_item idx, int val) -{ - mod_lruvec_page_state(&folio->page, idx, val); -} - static inline void lruvec_stat_add_folio(struct folio *folio, enum node_stat_item idx) { diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h deleted file mode 100644 index 3495fd0dc790..000000000000 --- a/include/linux/w1-gpio.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * w1-gpio interface to platform code - * - * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi> - */ -#ifndef _LINUX_W1_GPIO_H -#define _LINUX_W1_GPIO_H - -struct gpio_desc; - -/** - * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio - */ -struct w1_gpio_platform_data { - struct gpio_desc *gpiod; - struct gpio_desc *pullup_gpiod; - void (*enable_external_pullup)(int enable); - unsigned int pullup_duration; -}; - -#endif /* _LINUX_W1_GPIO_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 5ec7739400f4..8aa3372f21a0 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -9,7 +9,6 @@ #include <linux/spinlock.h> #include <asm/current.h> -#include <uapi/linux/wait.h> typedef struct wait_queue_entry wait_queue_entry_t; @@ -19,10 +18,9 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int /* wait_queue_entry::flags */ #define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_WOKEN 0x02 -#define WQ_FLAG_BOOKMARK 0x04 -#define WQ_FLAG_CUSTOM 0x08 -#define WQ_FLAG_DONE 0x10 -#define WQ_FLAG_PRIORITY 0x20 +#define WQ_FLAG_CUSTOM 0x04 +#define WQ_FLAG_DONE 0x08 +#define WQ_FLAG_PRIORITY 0x10 /* * A single wait-queue entry structure: @@ -212,8 +210,6 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); -void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, - unsigned int mode, void *key, wait_queue_entry_t *bookmark); void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h index 45cd42f55d49..429c7b6afead 100644 --- a/include/linux/watch_queue.h +++ b/include/linux/watch_queue.h @@ -32,7 +32,7 @@ struct watch_filter { DECLARE_BITMAP(type_filter, WATCH_TYPE__NR); }; u32 nr_filters; /* Number of filters */ - struct watch_type_filter filters[]; + struct watch_type_filter filters[] __counted_by(nr_filters); }; struct watch_queue { diff --git a/include/linux/wmi.h b/include/linux/wmi.h index 763bd382cf2d..686291b87852 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -11,7 +11,6 @@ #include <linux/device.h> #include <linux/acpi.h> #include <linux/mod_devicetable.h> -#include <uapi/linux/wmi.h> /** * struct wmi_device - WMI device structure @@ -22,11 +21,17 @@ */ struct wmi_device { struct device dev; - - /* private: used by the WMI driver core */ bool setable; }; +/** + * to_wmi_device() - Helper macro to cast a device to a wmi_device + * @device: device struct + * + * Cast a struct device to a struct wmi_device. + */ +#define to_wmi_device(device) container_of(device, struct wmi_device, dev) + extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id, const struct acpi_buffer *in, @@ -35,9 +40,9 @@ extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev, extern union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance); -u8 wmidev_instance_count(struct wmi_device *wdev); +acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct acpi_buffer *in); -extern int set_required_buffer_size(struct wmi_device *wdev, u64 length); +u8 wmidev_instance_count(struct wmi_device *wdev); /** * struct wmi_driver - WMI driver structure @@ -47,11 +52,8 @@ extern int set_required_buffer_size(struct wmi_device *wdev, u64 length); * @probe: Callback for device binding * @remove: Callback for device unbinding * @notify: Callback for receiving WMI events - * @filter_callback: Callback for filtering device IOCTLs * * This represents WMI drivers which handle WMI devices. - * @filter_callback is only necessary for drivers which - * want to set up a WMI IOCTL interface. */ struct wmi_driver { struct device_driver driver; @@ -61,8 +63,6 @@ struct wmi_driver { int (*probe)(struct wmi_device *wdev, const void *context); void (*remove)(struct wmi_device *wdev); void (*notify)(struct wmi_device *device, union acpi_object *data); - long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd, - struct wmi_ioctl_buffer *arg); }; extern int __must_check __wmi_driver_register(struct wmi_driver *driver, diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1c1d06804d45..2cc0a9606175 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -14,12 +14,7 @@ #include <linux/atomic.h> #include <linux/cpumask.h> #include <linux/rcupdate.h> - -struct workqueue_struct; - -struct work_struct; -typedef void (*work_func_t)(struct work_struct *work); -void delayed_work_timer_fn(struct timer_list *t); +#include <linux/workqueue_types.h> /* * The first word is the work queue pointer and the flags rolled into @@ -95,15 +90,6 @@ enum { #define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1) #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) -struct work_struct { - atomic_long_t data; - struct list_head entry; - work_func_t func; -#ifdef CONFIG_LOCKDEP - struct lockdep_map lockdep_map; -#endif -}; - #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) #define WORK_DATA_STATIC_INIT() \ ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) @@ -274,18 +260,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } * to generate better code. */ #ifdef CONFIG_LOCKDEP -#define __INIT_WORK(_work, _func, _onstack) \ +#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ do { \ - static struct lock_class_key __key; \ - \ __init_work((_work), _onstack); \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ - lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \ + lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \ INIT_LIST_HEAD(&(_work)->entry); \ (_work)->func = (_func); \ } while (0) #else -#define __INIT_WORK(_work, _func, _onstack) \ +#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ do { \ __init_work((_work), _onstack); \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ @@ -294,12 +278,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } } while (0) #endif +#define __INIT_WORK(_work, _func, _onstack) \ + do { \ + static __maybe_unused struct lock_class_key __key; \ + \ + __INIT_WORK_KEY(_work, _func, _onstack, &__key); \ + } while (0) + #define INIT_WORK(_work, _func) \ __INIT_WORK((_work), (_func), 0) #define INIT_WORK_ONSTACK(_work, _func) \ __INIT_WORK((_work), (_func), 1) +#define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \ + __INIT_WORK_KEY((_work), (_func), 1, _key) + #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ do { \ INIT_WORK(&(_work)->work, (_func)); \ @@ -483,7 +477,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(void); void free_workqueue_attrs(struct workqueue_attrs *attrs); int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); -int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); +extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask); extern bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work); @@ -693,8 +687,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) return fn(arg); } #else -long work_on_cpu(int cpu, long (*fn)(void *), void *arg); -long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg); +long work_on_cpu_key(int cpu, long (*fn)(void *), + void *arg, struct lock_class_key *key); +/* + * A new key is defined for each caller to make sure the work + * associated with the function doesn't share its locking class. + */ +#define work_on_cpu(_cpu, _fn, _arg) \ +({ \ + static struct lock_class_key __key; \ + \ + work_on_cpu_key(_cpu, _fn, _arg, &__key); \ +}) + +long work_on_cpu_safe_key(int cpu, long (*fn)(void *), + void *arg, struct lock_class_key *key); + +/* + * A new key is defined for each caller to make sure the work + * associated with the function doesn't share its locking class. + */ +#define work_on_cpu_safe(_cpu, _fn, _arg) \ +({ \ + static struct lock_class_key __key; \ + \ + work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \ +}) #endif /* CONFIG_SMP */ #ifdef CONFIG_FREEZER diff --git a/include/linux/workqueue_types.h b/include/linux/workqueue_types.h new file mode 100644 index 000000000000..4c38824f3ab4 --- /dev/null +++ b/include/linux/workqueue_types.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_WORKQUEUE_TYPES_H +#define _LINUX_WORKQUEUE_TYPES_H + +#include <linux/atomic.h> +#include <linux/lockdep_types.h> +#include <linux/timer_types.h> +#include <linux/types.h> + +struct workqueue_struct; + +struct work_struct; +typedef void (*work_func_t)(struct work_struct *work); +void delayed_work_timer_fn(struct timer_list *t); + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif +}; + +#endif /* _LINUX_WORKQUEUE_TYPES_H */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 083387c00f0c..453736fd1d23 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -60,7 +60,7 @@ struct writeback_control { unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ - unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */ + unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */ /* * When writeback IOs are bounced through async layers, only the @@ -193,7 +193,6 @@ void inode_io_list_del(struct inode *inode); /* writeback.h requires fs.h; it, too, is not included from here. */ static inline void wait_on_inode(struct inode *inode) { - might_sleep(); wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); } diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 741703b45f61..cb571dfcf4b1 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa, * stores the index into the @id pointer, then stores the entry at * that index. A concurrent lookup will not see an uninitialised @id. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. * Return: 0 on success, -ENOMEM if memory could not be allocated or @@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, * stores the index into the @id pointer, then stores the entry at * that index. A concurrent lookup will not see an uninitialised @id. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. * Return: 0 on success, -ENOMEM if memory could not be allocated or @@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, * stores the index into the @id pointer, then stores the entry at * that index. A concurrent lookup will not see an uninitialised @id. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. * Return: 0 on success, -ENOMEM if memory could not be allocated or @@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, * The search for an empty entry will start at @next and will wrap * around if necessary. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. * Return: 0 if the allocation succeeded without wrapping. 1 if the @@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, * The search for an empty entry will start at @next and will wrap * around if necessary. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. * Return: 0 if the allocation succeeded without wrapping. 1 if the @@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, * The search for an empty entry will start at @next and will wrap * around if necessary. * + * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set + * in xa_init_flags(). + * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. * Return: 0 if the allocation succeeded without wrapping. 1 if the diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 2a60ce39cfde..0b709f5bc65f 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -5,19 +5,41 @@ #include <linux/types.h> #include <linux/mm_types.h> +struct lruvec; + extern u64 zswap_pool_total_size; extern atomic_t zswap_stored_pages; #ifdef CONFIG_ZSWAP +struct zswap_lruvec_state { + /* + * Number of pages in zswap that should be protected from the shrinker. + * This number is an estimate of the following counts: + * + * a) Recent page faults. + * b) Recent insertion to the zswap LRU. This includes new zswap stores, + * as well as recent zswap LRU rotations. + * + * These pages are likely to be warm, and might incur IO if the are written + * to swap. + */ + atomic_long_t nr_zswap_protected; +}; + bool zswap_store(struct folio *folio); bool zswap_load(struct folio *folio); void zswap_invalidate(int type, pgoff_t offset); void zswap_swapon(int type); void zswap_swapoff(int type); - +void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg); +void zswap_lruvec_state_init(struct lruvec *lruvec); +void zswap_folio_swapin(struct folio *folio); +bool is_zswap_enabled(void); #else +struct zswap_lruvec_state {}; + static inline bool zswap_store(struct folio *folio) { return false; @@ -31,6 +53,14 @@ static inline bool zswap_load(struct folio *folio) static inline void zswap_invalidate(int type, pgoff_t offset) {} static inline void zswap_swapon(int type) {} static inline void zswap_swapoff(int type) {} +static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {} +static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {} +static inline void zswap_folio_swapin(struct folio *folio) {} + +static inline bool is_zswap_enabled(void) +{ + return false; +} #endif diff --git a/include/media/cec.h b/include/media/cec.h index 9c007f83569a..d77982685116 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -207,7 +207,20 @@ struct cec_adap_ops { * passthrough mode. * @log_addrs: current logical addresses * @conn_info: current connector info - * @tx_timeouts: number of transmit timeouts + * @tx_timeout_cnt: count the number of Timed Out transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_low_drive_cnt: count the number of Low Drive transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_error_cnt: count the number of Error transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_arb_lost_cnt: count the number of Arb Lost transmits. + * Reset to 0 when this is reported in cec_adap_status(). + * @tx_low_drive_log_cnt: number of logged Low Drive transmits since the + * adapter was enabled. Used to avoid flooding the kernel + * log if this happens a lot. + * @tx_error_log_cnt: number of logged Error transmits since the adapter was + * enabled. Used to avoid flooding the kernel log if this + * happens a lot. * @notifier: CEC notifier * @pin: CEC pin status struct * @cec_dir: debugfs cec directory @@ -262,7 +275,12 @@ struct cec_adapter { struct cec_log_addrs log_addrs; struct cec_connector_info conn_info; - u32 tx_timeouts; + u32 tx_timeout_cnt; + u32 tx_low_drive_cnt; + u32 tx_error_cnt; + u32 tx_arb_lost_cnt; + u32 tx_low_drive_log_cnt; + u32 tx_error_log_cnt; #ifdef CONFIG_CEC_NOTIFIER struct cec_notifier *notifier; @@ -275,7 +293,7 @@ struct cec_adapter { u32 sequence; - char input_phys[32]; + char input_phys[40]; }; static inline void *cec_get_drvdata(const struct cec_adapter *adap) diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h index bdc654a45521..783bda6d5cc3 100644 --- a/include/media/ipu-bridge.h +++ b/include/media/ipu-bridge.h @@ -108,7 +108,7 @@ struct ipu_node_names { char ivsc_sensor_port[7]; char ivsc_ipu_port[7]; char endpoint[11]; - char remote_port[7]; + char remote_port[9]; char vcm[16]; }; diff --git a/include/media/mipi-csi2.h b/include/media/mipi-csi2.h index c3d8f12234b1..40fc0264250d 100644 --- a/include/media/mipi-csi2.h +++ b/include/media/mipi-csi2.h @@ -19,6 +19,7 @@ #define MIPI_CSI2_DT_NULL 0x10 #define MIPI_CSI2_DT_BLANKING 0x11 #define MIPI_CSI2_DT_EMBEDDED_8B 0x12 +#define MIPI_CSI2_DT_GENERIC_LONG(n) (0x13 + (n) - 1) /* 1..4 */ #define MIPI_CSI2_DT_YUV420_8B 0x18 #define MIPI_CSI2_DT_YUV420_10B 0x19 #define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h index 0f6803e4b17e..4e96e90ee636 100644 --- a/include/media/v4l2-cci.h +++ b/include/media/v4l2-cci.h @@ -7,6 +7,8 @@ #ifndef _V4L2_CCI_H #define _V4L2_CCI_H +#include <linux/bitfield.h> +#include <linux/bits.h> #include <linux/types.h> struct i2c_client; @@ -32,12 +34,26 @@ struct cci_reg_sequence { #define CCI_REG_ADDR_MASK GENMASK(15, 0) #define CCI_REG_WIDTH_SHIFT 16 #define CCI_REG_WIDTH_MASK GENMASK(19, 16) +/* + * Private CCI register flags, for the use of drivers. + */ +#define CCI_REG_PRIVATE_SHIFT 28U +#define CCI_REG_PRIVATE_MASK GENMASK(31U, CCI_REG_PRIVATE_SHIFT) + +#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x) +#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3) +#define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x) +#define CCI_REG_LE BIT(20) #define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x)) #define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x)) #define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x)) #define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x)) #define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG16_LE(x) (CCI_REG_LE | (2U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG24_LE(x) (CCI_REG_LE | (3U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG32_LE(x) (CCI_REG_LE | (4U << CCI_REG_WIDTH_SHIFT) | (x)) +#define CCI_REG64_LE(x) (CCI_REG_LE | (8U << CCI_REG_WIDTH_SHIFT) | (x)) /** * cci_read() - Read a value from a single CCI register diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index d278836fd9cb..acf5be24a5ca 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -425,7 +425,7 @@ __v4l2_find_nearest_size(const void *array, size_t array_size, /** * v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by - * calling the g_frame_interval op of the given subdev. It only works + * calling the get_frame_interval op of the given subdev. It only works * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the * function name. * @@ -438,7 +438,7 @@ int v4l2_g_parm_cap(struct video_device *vdev, /** * v4l2_s_parm_cap - helper routine for vidioc_s_parm to fill this in by - * calling the s_frame_interval op of the given subdev. It only works + * calling the set_frame_interval op of the given subdev. It only works * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the * function name. * diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index e0a13505f88d..d82dfdbf6e58 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -284,7 +284,7 @@ struct video_device { struct v4l2_prio_state *prio; /* device info */ - char name[32]; + char name[64]; enum vfl_devnode_type vfl_type; enum vfl_devnode_direction vfl_dir; int minor; diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h index 8a8977a33ec1..f6f111fae33c 100644 --- a/include/media/v4l2-device.h +++ b/include/media/v4l2-device.h @@ -13,8 +13,6 @@ #include <media/v4l2-subdev.h> #include <media/v4l2-dev.h> -#define V4L2_DEVICE_NAME_SIZE (20 + 16) - struct v4l2_ctrl_handler; /** @@ -49,7 +47,7 @@ struct v4l2_device { struct media_device *mdev; struct list_head subdevs; spinlock_t lock; - char name[V4L2_DEVICE_NAME_SIZE]; + char name[36]; void (*notify)(struct v4l2_subdev *sd, unsigned int notification, void *arg); struct v4l2_ctrl_handler *ctrl_handler; diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h index 4ffa914ade3a..3a0e2588361c 100644 --- a/include/media/v4l2-event.h +++ b/include/media/v4l2-event.h @@ -78,7 +78,7 @@ struct v4l2_subscribed_event { unsigned int elems; unsigned int first; unsigned int in_use; - struct v4l2_kevent events[]; + struct v4l2_kevent events[] __counted_by(elems); }; /** diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h index b39586dfba35..ed0a44b6eada 100644 --- a/include/media/v4l2-mc.h +++ b/include/media/v4l2-mc.h @@ -143,6 +143,9 @@ int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd, * v4l2_pipeline_pm_get - Increase the use count of a pipeline * @entity: The root entity of a pipeline * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * * Update the use count of all entities in the pipeline and power entities on. * * This function is intended to be called in video node open. It uses @@ -157,6 +160,9 @@ int v4l2_pipeline_pm_get(struct media_entity *entity); * v4l2_pipeline_pm_put - Decrease the use count of a pipeline * @entity: The root entity of a pipeline * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * * Update the use count of all entities in the pipeline and power entities off. * * This function is intended to be called in video node release. It uses diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index d6c8eb2b5201..7f1af1f7f912 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -84,6 +84,12 @@ struct v4l2_m2m_queue_ctx { * @last_src_buf: indicate the last source buffer for draining * @next_buf_last: next capture queud buffer will be tagged as last * @has_stopped: indicate the device has been stopped + * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE + * queue is not streaming. This allows firmware to + * analyze the bitstream header which arrives on the + * OUTPUT queue. The driver must implement the job_ready + * callback correctly to make sure that the requirements + * for actual decoding are met. * @m2m_dev: opaque pointer to the internal data to handle M2M context * @cap_q_ctx: Capture (output to memory) queue context * @out_q_ctx: Output (input from memory) queue context @@ -106,6 +112,7 @@ struct v4l2_m2m_ctx { struct vb2_v4l2_buffer *last_src_buf; bool next_buf_last; bool has_stopped; + bool ignore_cap_streaming; /* internal use only */ struct v4l2_m2m_dev *m2m_dev; @@ -661,7 +668,7 @@ v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx); /** - * v4l2_m2m_last_src_buf() - return last destination buffer from the list of + * v4l2_m2m_last_src_buf() - return last source buffer from the list of * ready buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index d9fca929c10b..a9e6b8146279 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -446,16 +446,12 @@ enum v4l2_subdev_pre_streamon_flags { * @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the * sub-device. Failure on stop will remove any resources acquired in * streaming start, while the error code is still returned by the driver. - * Also see call_s_stream wrapper in v4l2-subdev.c. + * The caller shall track the subdev state, and shall not start or stop an + * already started or stopped subdev. Also see call_s_stream wrapper in + * v4l2-subdev.c. * * @g_pixelaspect: callback to return the pixelaspect ratio. * - * @g_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL() - * ioctl handler code. - * - * @s_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL() - * ioctl handler code. - * * @s_dv_timings: Set custom dv timings in the sub device. This is used * when sub device is capable of setting detailed timing information * in the hardware to generate/detect the video signal. @@ -494,10 +490,6 @@ struct v4l2_subdev_video_ops { int (*g_input_status)(struct v4l2_subdev *sd, u32 *status); int (*s_stream)(struct v4l2_subdev *sd, int enable); int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect); - int (*g_frame_interval)(struct v4l2_subdev *sd, - struct v4l2_subdev_frame_interval *interval); - int (*s_frame_interval)(struct v4l2_subdev *sd, - struct v4l2_subdev_frame_interval *interval); int (*s_dv_timings)(struct v4l2_subdev *sd, struct v4l2_dv_timings *timings); int (*g_dv_timings)(struct v4l2_subdev *sd, @@ -686,21 +678,16 @@ struct v4l2_subdev_ir_ops { /** * struct v4l2_subdev_pad_config - Used for storing subdev pad information. * - * @try_fmt: &struct v4l2_mbus_framefmt - * @try_crop: &struct v4l2_rect to be used for crop - * @try_compose: &struct v4l2_rect to be used for compose - * - * This structure only needs to be passed to the pad op if the 'which' field - * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For - * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL. - * - * Note: This struct is also used in active state, and the 'try' prefix is - * historical and to be removed. + * @format: &struct v4l2_mbus_framefmt + * @crop: &struct v4l2_rect to be used for crop + * @compose: &struct v4l2_rect to be used for compose + * @interval: frame interval */ struct v4l2_subdev_pad_config { - struct v4l2_mbus_framefmt try_fmt; - struct v4l2_rect try_crop; - struct v4l2_rect try_compose; + struct v4l2_mbus_framefmt format; + struct v4l2_rect crop; + struct v4l2_rect compose; + struct v4l2_fract interval; }; /** @@ -712,6 +699,7 @@ struct v4l2_subdev_pad_config { * @fmt: &struct v4l2_mbus_framefmt * @crop: &struct v4l2_rect to be used for crop * @compose: &struct v4l2_rect to be used for compose + * @interval: frame interval * * This structure stores configuration for a stream. */ @@ -723,6 +711,7 @@ struct v4l2_subdev_stream_config { struct v4l2_mbus_framefmt fmt; struct v4l2_rect crop; struct v4l2_rect compose; + struct v4l2_fract interval; }; /** @@ -754,6 +743,7 @@ struct v4l2_subdev_krouting { * * @_lock: default for 'lock' * @lock: mutex for the state. May be replaced by the user. + * @sd: the sub-device which the state is related to * @pads: &struct v4l2_subdev_pad_config array * @routing: routing table for the subdev * @stream_configs: stream configurations (only for V4L2_SUBDEV_FL_STREAMS) @@ -766,6 +756,7 @@ struct v4l2_subdev_state { /* lock for the struct v4l2_subdev_state fields */ struct mutex _lock; struct mutex *lock; + struct v4l2_subdev *sd; struct v4l2_subdev_pad_config *pads; struct v4l2_subdev_krouting routing; struct v4l2_subdev_stream_configs stream_configs; @@ -774,7 +765,6 @@ struct v4l2_subdev_state { /** * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations * - * @init_cfg: initialize the pad config to default values * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler * code. * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler @@ -791,6 +781,12 @@ struct v4l2_subdev_state { * * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code. * + * @get_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL() + * ioctl handler code. + * + * @set_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL() + * ioctl handler code. + * * @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code. * * @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code. @@ -822,8 +818,9 @@ struct v4l2_subdev_state { * operation shall fail if the pad index it has been called on * is not valid or in case of unrecoverable failures. * - * @set_routing: enable or disable data connection routes described in the - * subdevice routing table. + * @set_routing: Enable or disable data connection routes described in the + * subdevice routing table. Subdevs that implement this operation + * must set the V4L2_SUBDEV_FL_STREAMS flag. * * @enable_streams: Enable the streams defined in streams_mask on the given * source pad. Subdevs that implement this operation must use the active @@ -838,8 +835,6 @@ struct v4l2_subdev_state { * directly, use v4l2_subdev_disable_streams() instead. */ struct v4l2_subdev_pad_ops { - int (*init_cfg)(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state); int (*enum_mbus_code)(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code); @@ -861,6 +856,12 @@ struct v4l2_subdev_pad_ops { int (*set_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel); + int (*get_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *interval); + int (*set_frame_interval)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *interval); int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); int (*dv_timings_cap)(struct v4l2_subdev *sd, @@ -916,6 +917,8 @@ struct v4l2_subdev_ops { /** * struct v4l2_subdev_internal_ops - V4L2 subdev internal ops * + * @init_state: initialize the subdev state to default values + * * @registered: called when this subdev is registered. When called the v4l2_dev * field is set to the correct v4l2_device. * @@ -941,6 +944,8 @@ struct v4l2_subdev_ops { * these ops. */ struct v4l2_subdev_internal_ops { + int (*init_state)(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state); int (*registered)(struct v4l2_subdev *sd); void (*unregistered)(struct v4l2_subdev *sd); int (*open)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); @@ -948,8 +953,6 @@ struct v4l2_subdev_internal_ops { void (*release)(struct v4l2_subdev *sd); }; -#define V4L2_SUBDEV_NAME_SIZE 32 - /* Set this flag if this subdev is a i2c device. */ #define V4L2_SUBDEV_FL_IS_I2C (1U << 0) /* Set this flag if this subdev is a spi device. */ @@ -1059,7 +1062,7 @@ struct v4l2_subdev { const struct v4l2_subdev_ops *ops; const struct v4l2_subdev_internal_ops *internal_ops; struct v4l2_ctrl_handler *ctrl_handler; - char name[V4L2_SUBDEV_NAME_SIZE]; + char name[52]; u32 grp_id; void *dev_priv; void *host_priv; @@ -1141,83 +1144,6 @@ struct v4l2_subdev_fh { #define to_v4l2_subdev_fh(fh) \ container_of(fh, struct v4l2_subdev_fh, vfh) -#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) - -/** - * v4l2_subdev_get_pad_format - ancillary routine to call - * &struct v4l2_subdev_pad_config->try_fmt - * - * @sd: pointer to &struct v4l2_subdev - * @state: pointer to &struct v4l2_subdev_state - * @pad: index of the pad in the &struct v4l2_subdev_state->pads array - */ -static inline struct v4l2_mbus_framefmt * -v4l2_subdev_get_pad_format(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - unsigned int pad) -{ - if (WARN_ON(!state)) - return NULL; - if (WARN_ON(pad >= sd->entity.num_pads)) - pad = 0; - return &state->pads[pad].try_fmt; -} - -/** - * v4l2_subdev_get_pad_crop - ancillary routine to call - * &struct v4l2_subdev_pad_config->try_crop - * - * @sd: pointer to &struct v4l2_subdev - * @state: pointer to &struct v4l2_subdev_state. - * @pad: index of the pad in the &struct v4l2_subdev_state->pads array. - */ -static inline struct v4l2_rect * -v4l2_subdev_get_pad_crop(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - unsigned int pad) -{ - if (WARN_ON(!state)) - return NULL; - if (WARN_ON(pad >= sd->entity.num_pads)) - pad = 0; - return &state->pads[pad].try_crop; -} - -/** - * v4l2_subdev_get_pad_compose - ancillary routine to call - * &struct v4l2_subdev_pad_config->try_compose - * - * @sd: pointer to &struct v4l2_subdev - * @state: pointer to &struct v4l2_subdev_state. - * @pad: index of the pad in the &struct v4l2_subdev_state->pads array. - */ -static inline struct v4l2_rect * -v4l2_subdev_get_pad_compose(struct v4l2_subdev *sd, - struct v4l2_subdev_state *state, - unsigned int pad) -{ - if (WARN_ON(!state)) - return NULL; - if (WARN_ON(pad >= sd->entity.num_pads)) - pad = 0; - return &state->pads[pad].try_compose; -} - -/* - * Temprary helpers until uses of v4l2_subdev_get_try_* functions have been - * renamed - */ -#define v4l2_subdev_get_try_format(sd, state, pad) \ - v4l2_subdev_get_pad_format(sd, state, pad) - -#define v4l2_subdev_get_try_crop(sd, state, pad) \ - v4l2_subdev_get_pad_crop(sd, state, pad) - -#define v4l2_subdev_get_try_compose(sd, state, pad) \ - v4l2_subdev_get_pad_compose(sd, state, pad) - -#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ - extern const struct v4l2_file_operations v4l2_subdev_fops; /** @@ -1391,88 +1317,105 @@ int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, */ void v4l2_subdev_cleanup(struct v4l2_subdev *sd); -/** - * v4l2_subdev_lock_state() - Locks the subdev state - * @state: The subdevice state - * - * Locks the given subdev state. - * - * The state must be unlocked with v4l2_subdev_unlock_state() after use. +/* + * A macro to generate the macro or function name for sub-devices state access + * wrapper macros below. */ -static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state) -{ - mutex_lock(state->lock); -} +#define __v4l2_subdev_state_gen_call(NAME, _1, ARG, ...) \ + __v4l2_subdev_state_get_ ## NAME ## ARG /** - * v4l2_subdev_unlock_state() - Unlocks the subdev state - * @state: The subdevice state + * v4l2_subdev_state_get_format() - Get pointer to a stream format + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) * - * Unlocks the given subdev state. + * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad + + * stream in the subdev state. + * + * For stream-unaware drivers the format for the corresponding pad is returned. + * If the pad does not exist, NULL is returned. */ -static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state) -{ - mutex_unlock(state->lock); -} +/* + * Wrap v4l2_subdev_state_get_format(), allowing the function to be called with + * two or three arguments. The purpose of the __v4l2_subdev_state_get_format() + * macro below is to come up with the name of the function or macro to call, + * using the last two arguments (_stream and _pad). The selected function or + * macro is then called using the arguments specified by the caller. A similar + * arrangement is used for v4l2_subdev_state_crop() and + * v4l2_subdev_state_compose() below. + */ +#define v4l2_subdev_state_get_format(state, pad, ...) \ + __v4l2_subdev_state_gen_call(format, ##__VA_ARGS__, , _pad) \ + (state, pad, ##__VA_ARGS__) +#define __v4l2_subdev_state_get_format_pad(state, pad) \ + __v4l2_subdev_state_get_format(state, pad, 0) +struct v4l2_mbus_framefmt * +__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); /** - * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state - * is unlocked and returns it - * @sd: The subdevice + * v4l2_subdev_state_get_crop() - Get pointer to a stream crop rectangle + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) * - * Returns the active state for the subdevice, or NULL if the subdev does not - * support active state. If the state is not NULL, calls - * lockdep_assert_not_held() to issue a warning if the state is locked. + * This returns a pointer to crop rectangle for the given pad + stream in the + * subdev state. * - * This function is to be used e.g. when getting the active state for the sole - * purpose of passing it forward, without accessing the state fields. + * For stream-unaware drivers the crop rectangle for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. */ -static inline struct v4l2_subdev_state * -v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd) -{ - if (sd->active_state) - lockdep_assert_not_held(sd->active_state->lock); - return sd->active_state; -} +#define v4l2_subdev_state_get_crop(state, pad, ...) \ + __v4l2_subdev_state_gen_call(crop, ##__VA_ARGS__, , _pad) \ + (state, pad, ##__VA_ARGS__) +#define __v4l2_subdev_state_get_crop_pad(state, pad) \ + __v4l2_subdev_state_get_crop(state, pad, 0) +struct v4l2_rect * +__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, + u32 stream); /** - * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state - * is locked and returns it - * - * @sd: The subdevice + * v4l2_subdev_state_get_compose() - Get pointer to a stream compose rectangle + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) * - * Returns the active state for the subdevice, or NULL if the subdev does not - * support active state. If the state is not NULL, calls lockdep_assert_held() - * to issue a warning if the state is not locked. + * This returns a pointer to compose rectangle for the given pad + stream in the + * subdev state. * - * This function is to be used when the caller knows that the active state is - * already locked. + * For stream-unaware drivers the compose rectangle for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. */ -static inline struct v4l2_subdev_state * -v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd) -{ - if (sd->active_state) - lockdep_assert_held(sd->active_state->lock); - return sd->active_state; -} +#define v4l2_subdev_state_get_compose(state, pad, ...) \ + __v4l2_subdev_state_gen_call(compose, ##__VA_ARGS__, , _pad) \ + (state, pad, ##__VA_ARGS__) +#define __v4l2_subdev_state_get_compose_pad(state, pad) \ + __v4l2_subdev_state_get_compose(state, pad, 0) +struct v4l2_rect * +__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); /** - * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev - * state for the subdevice - * @sd: The subdevice + * v4l2_subdev_state_get_interval() - Get pointer to a stream frame interval + * @state: subdevice state + * @pad: pad id + * @...: stream id (optional argument) * - * Returns the locked active state for the subdevice, or NULL if the subdev - * does not support active state. + * This returns a pointer to the frame interval for the given pad + stream in + * the subdev state. * - * The state must be unlocked with v4l2_subdev_unlock_state() after use. + * For stream-unaware drivers the frame interval for the corresponding pad is + * returned. If the pad does not exist, NULL is returned. */ -static inline struct v4l2_subdev_state * -v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd) -{ - if (sd->active_state) - v4l2_subdev_lock_state(sd->active_state); - return sd->active_state; -} +#define v4l2_subdev_state_get_interval(state, pad, ...) \ + __v4l2_subdev_state_gen_call(interval, ##__VA_ARGS__, , _pad) \ + (state, pad, ##__VA_ARGS__) +#define __v4l2_subdev_state_get_interval_pad(state, pad) \ + __v4l2_subdev_state_get_interval(state, pad, 0) +struct v4l2_fract * +__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, + unsigned int pad, u32 stream); #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) @@ -1494,6 +1437,24 @@ int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format); /** + * v4l2_subdev_get_frame_interval() - Fill frame interval based on state + * @sd: subdevice + * @state: subdevice state + * @fi: pointer to &struct v4l2_subdev_frame_interval + * + * Fill @fi->interval field based on the information in the @fi struct. + * + * This function can be used by the subdev drivers which support active state to + * implement v4l2_subdev_pad_ops.get_frame_interval if the subdev driver does + * not need to do anything special in their get_frame_interval op. + * + * Returns 0 on success, error value otherwise. + */ +int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval *fi); + +/** * v4l2_subdev_set_routing() - Set given routing to subdev state * @sd: The subdevice * @state: The subdevice state @@ -1539,52 +1500,6 @@ int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, const struct v4l2_mbus_framefmt *fmt); /** - * v4l2_subdev_state_get_stream_format() - Get pointer to a stream format - * @state: subdevice state - * @pad: pad id - * @stream: stream id - * - * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad + - * stream in the subdev state. - * - * If the state does not contain the given pad + stream, NULL is returned. - */ -struct v4l2_mbus_framefmt * -v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state, - unsigned int pad, u32 stream); - -/** - * v4l2_subdev_state_get_stream_crop() - Get pointer to a stream crop rectangle - * @state: subdevice state - * @pad: pad id - * @stream: stream id - * - * This returns a pointer to crop rectangle for the given pad + stream in the - * subdev state. - * - * If the state does not contain the given pad + stream, NULL is returned. - */ -struct v4l2_rect * -v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state, - unsigned int pad, u32 stream); - -/** - * v4l2_subdev_state_get_stream_compose() - Get pointer to a stream compose - * rectangle - * @state: subdevice state - * @pad: pad id - * @stream: stream id - * - * This returns a pointer to compose rectangle for the given pad + stream in the - * subdev state. - * - * If the state does not contain the given pad + stream, NULL is returned. - */ -struct v4l2_rect * -v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state, - unsigned int pad, u32 stream); - -/** * v4l2_subdev_routing_find_opposite_end() - Find the opposite stream * @routing: routing used to find the opposite side * @pad: pad id @@ -1786,6 +1701,89 @@ int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable); #endif /* CONFIG_MEDIA_CONTROLLER */ /** + * v4l2_subdev_lock_state() - Locks the subdev state + * @state: The subdevice state + * + * Locks the given subdev state. + * + * The state must be unlocked with v4l2_subdev_unlock_state() after use. + */ +static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state) +{ + mutex_lock(state->lock); +} + +/** + * v4l2_subdev_unlock_state() - Unlocks the subdev state + * @state: The subdevice state + * + * Unlocks the given subdev state. + */ +static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state) +{ + mutex_unlock(state->lock); +} + +/** + * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state + * is unlocked and returns it + * @sd: The subdevice + * + * Returns the active state for the subdevice, or NULL if the subdev does not + * support active state. If the state is not NULL, calls + * lockdep_assert_not_held() to issue a warning if the state is locked. + * + * This function is to be used e.g. when getting the active state for the sole + * purpose of passing it forward, without accessing the state fields. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + lockdep_assert_not_held(sd->active_state->lock); + return sd->active_state; +} + +/** + * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state + * is locked and returns it + * + * @sd: The subdevice + * + * Returns the active state for the subdevice, or NULL if the subdev does not + * support active state. If the state is not NULL, calls lockdep_assert_held() + * to issue a warning if the state is not locked. + * + * This function is to be used when the caller knows that the active state is + * already locked. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + lockdep_assert_held(sd->active_state->lock); + return sd->active_state; +} + +/** + * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev + * state for the subdevice + * @sd: The subdevice + * + * Returns the locked active state for the subdevice, or NULL if the subdev + * does not support active state. + * + * The state must be unlocked with v4l2_subdev_unlock_state() after use. + */ +static inline struct v4l2_subdev_state * +v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd) +{ + if (sd->active_state) + v4l2_subdev_lock_state(sd->active_state); + return sd->active_state; +} + +/** * v4l2_subdev_init - initializes the sub-device struct * * @sd: pointer to the &struct v4l2_subdev to be initialized diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h deleted file mode 100644 index 2e01b2e9a1c0..000000000000 --- a/include/media/videobuf-core.h +++ /dev/null @@ -1,233 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * generic helper functions for handling video4linux capture buffers - * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> - * - * Highly based on video-buf written originally by: - * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> - * (c) 2006 Ted Walther and John Sokol - */ - -#ifndef _VIDEOBUF_CORE_H -#define _VIDEOBUF_CORE_H - -#include <linux/poll.h> -#include <linux/videodev2.h> - -#define UNSET (-1U) - - -struct videobuf_buffer; -struct videobuf_queue; - -/* --------------------------------------------------------------------- */ - -/* - * A small set of helper functions to manage video4linux buffers. - * - * struct videobuf_buffer holds the data structures used by the helper - * functions, additionally some commonly used fields for v4l buffers - * (width, height, lists, waitqueue) are in there. That struct should - * be used as first element in the drivers buffer struct. - * - * about the mmap helpers (videobuf_mmap_*): - * - * The mmaper function allows to map any subset of contiguous buffers. - * This includes one mmap() call for all buffers (which the original - * video4linux API uses) as well as one mmap() for every single buffer - * (which v4l2 uses). - * - * If there is a valid mapping for a buffer, buffer->baddr/bsize holds - * userspace address + size which can be fed into the - * videobuf_dma_init_user function listed above. - * - */ - -struct videobuf_mapping { - unsigned int count; - struct videobuf_queue *q; -}; - -enum videobuf_state { - VIDEOBUF_NEEDS_INIT = 0, - VIDEOBUF_PREPARED = 1, - VIDEOBUF_QUEUED = 2, - VIDEOBUF_ACTIVE = 3, - VIDEOBUF_DONE = 4, - VIDEOBUF_ERROR = 5, - VIDEOBUF_IDLE = 6, -}; - -struct videobuf_buffer { - unsigned int i; - u32 magic; - - /* info about the buffer */ - unsigned int width; - unsigned int height; - unsigned int bytesperline; /* use only if != 0 */ - unsigned long size; - enum v4l2_field field; - enum videobuf_state state; - struct list_head stream; /* QBUF/DQBUF list */ - - /* touched by irq handler */ - struct list_head queue; - wait_queue_head_t done; - unsigned int field_count; - u64 ts; - - /* Memory type */ - enum v4l2_memory memory; - - /* buffer size */ - size_t bsize; - - /* buffer offset (mmap + overlay) */ - size_t boff; - - /* buffer addr (userland ptr!) */ - unsigned long baddr; - - /* for mmap'ed buffers */ - struct videobuf_mapping *map; - - /* Private pointer to allow specific methods to store their data */ - int privsize; - void *priv; -}; - -struct videobuf_queue_ops { - int (*buf_setup)(struct videobuf_queue *q, - unsigned int *count, unsigned int *size); - int (*buf_prepare)(struct videobuf_queue *q, - struct videobuf_buffer *vb, - enum v4l2_field field); - void (*buf_queue)(struct videobuf_queue *q, - struct videobuf_buffer *vb); - void (*buf_release)(struct videobuf_queue *q, - struct videobuf_buffer *vb); -}; - -#define MAGIC_QTYPE_OPS 0x12261003 - -/* Helper operations - device type dependent */ -struct videobuf_qtype_ops { - u32 magic; - - struct videobuf_buffer *(*alloc_vb)(size_t size); - void *(*vaddr) (struct videobuf_buffer *buf); - int (*iolock) (struct videobuf_queue *q, - struct videobuf_buffer *vb, - struct v4l2_framebuffer *fbuf); - int (*sync) (struct videobuf_queue *q, - struct videobuf_buffer *buf); - int (*mmap_mapper) (struct videobuf_queue *q, - struct videobuf_buffer *buf, - struct vm_area_struct *vma); -}; - -struct videobuf_queue { - struct mutex vb_lock; - struct mutex *ext_lock; - spinlock_t *irqlock; - struct device *dev; - - wait_queue_head_t wait; /* wait if queue is empty */ - - enum v4l2_buf_type type; - unsigned int msize; - enum v4l2_field field; - enum v4l2_field last; /* for field=V4L2_FIELD_ALTERNATE */ - struct videobuf_buffer *bufs[VIDEO_MAX_FRAME]; - const struct videobuf_queue_ops *ops; - struct videobuf_qtype_ops *int_ops; - - unsigned int streaming:1; - unsigned int reading:1; - - /* capture via mmap() + ioctl(QBUF/DQBUF) */ - struct list_head stream; - - /* capture via read() */ - unsigned int read_off; - struct videobuf_buffer *read_buf; - - /* driver private data */ - void *priv_data; -}; - -static inline void videobuf_queue_lock(struct videobuf_queue *q) -{ - if (!q->ext_lock) - mutex_lock(&q->vb_lock); -} - -static inline void videobuf_queue_unlock(struct videobuf_queue *q) -{ - if (!q->ext_lock) - mutex_unlock(&q->vb_lock); -} - -int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb, - int non_blocking, int intr); -int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, - struct v4l2_framebuffer *fbuf); - -struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q); - -/* Used on videobuf-dvb */ -void *videobuf_queue_to_vaddr(struct videobuf_queue *q, - struct videobuf_buffer *buf); - -void videobuf_queue_core_init(struct videobuf_queue *q, - const struct videobuf_queue_ops *ops, - struct device *dev, - spinlock_t *irqlock, - enum v4l2_buf_type type, - enum v4l2_field field, - unsigned int msize, - void *priv, - struct videobuf_qtype_ops *int_ops, - struct mutex *ext_lock); -int videobuf_queue_is_busy(struct videobuf_queue *q); -void videobuf_queue_cancel(struct videobuf_queue *q); - -enum v4l2_field videobuf_next_field(struct videobuf_queue *q); -int videobuf_reqbufs(struct videobuf_queue *q, - struct v4l2_requestbuffers *req); -int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b); -int videobuf_qbuf(struct videobuf_queue *q, - struct v4l2_buffer *b); -int videobuf_dqbuf(struct videobuf_queue *q, - struct v4l2_buffer *b, int nonblocking); -int videobuf_streamon(struct videobuf_queue *q); -int videobuf_streamoff(struct videobuf_queue *q); - -void videobuf_stop(struct videobuf_queue *q); - -int videobuf_read_start(struct videobuf_queue *q); -void videobuf_read_stop(struct videobuf_queue *q); -ssize_t videobuf_read_stream(struct videobuf_queue *q, - char __user *data, size_t count, loff_t *ppos, - int vbihack, int nonblocking); -ssize_t videobuf_read_one(struct videobuf_queue *q, - char __user *data, size_t count, loff_t *ppos, - int nonblocking); -__poll_t videobuf_poll_stream(struct file *file, - struct videobuf_queue *q, - poll_table *wait); - -int videobuf_mmap_setup(struct videobuf_queue *q, - unsigned int bcount, unsigned int bsize, - enum v4l2_memory memory); -int __videobuf_mmap_setup(struct videobuf_queue *q, - unsigned int bcount, unsigned int bsize, - enum v4l2_memory memory); -int videobuf_mmap_free(struct videobuf_queue *q); -int videobuf_mmap_mapper(struct videobuf_queue *q, - struct vm_area_struct *vma); - -#endif diff --git a/include/media/videobuf-dma-contig.h b/include/media/videobuf-dma-contig.h deleted file mode 100644 index 525883b2c53e..000000000000 --- a/include/media/videobuf-dma-contig.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * helper functions for physically contiguous capture buffers - * - * The functions support hardware lacking scatter gather support - * (i.e. the buffers must be linear in physical memory) - * - * Copyright (c) 2008 Magnus Damm - */ -#ifndef _VIDEOBUF_DMA_CONTIG_H -#define _VIDEOBUF_DMA_CONTIG_H - -#include <linux/dma-mapping.h> -#include <media/videobuf-core.h> - -void videobuf_queue_dma_contig_init(struct videobuf_queue *q, - const struct videobuf_queue_ops *ops, - struct device *dev, - spinlock_t *irqlock, - enum v4l2_buf_type type, - enum v4l2_field field, - unsigned int msize, - void *priv, - struct mutex *ext_lock); - -dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf); -void videobuf_dma_contig_free(struct videobuf_queue *q, - struct videobuf_buffer *buf); - -#endif /* _VIDEOBUF_DMA_CONTIG_H */ diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h deleted file mode 100644 index 930ff8d454fc..000000000000 --- a/include/media/videobuf-dma-sg.h +++ /dev/null @@ -1,102 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * helper functions for SG DMA video4linux capture buffers - * - * The functions expect the hardware being able to scatter gather - * (i.e. the buffers are not linear in physical memory, but fragmented - * into PAGE_SIZE chunks). They also assume the driver does not need - * to touch the video data. - * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> - * - * Highly based on video-buf written originally by: - * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> - * (c) 2006 Ted Walther and John Sokol - */ -#ifndef _VIDEOBUF_DMA_SG_H -#define _VIDEOBUF_DMA_SG_H - -#include <media/videobuf-core.h> - -/* --------------------------------------------------------------------- */ - -/* - * A small set of helper functions to manage buffers (both userland - * and kernel) for DMA. - * - * videobuf_dma_init_*() - * creates a buffer. The userland version takes a userspace - * pointer + length. The kernel version just wants the size and - * does memory allocation too using vmalloc_32(). - * - * videobuf_dma_*() - * see Documentation/core-api/dma-api-howto.rst, these functions to - * basically the same. The map function does also build a - * scatterlist for the buffer (and unmap frees it ...) - * - * videobuf_dma_free() - * no comment ... - * - */ - -struct videobuf_dmabuf { - u32 magic; - - /* for userland buffer */ - int offset; - size_t size; - struct page **pages; - - /* for kernel buffers */ - void *vaddr; - struct page **vaddr_pages; - dma_addr_t *dma_addr; - struct device *dev; - - /* for overlay buffers (pci-pci dma) */ - dma_addr_t bus_addr; - - /* common */ - struct scatterlist *sglist; - int sglen; - unsigned long nr_pages; - int direction; -}; - -struct videobuf_dma_sg_memory { - u32 magic; - - /* for mmap'ed buffers */ - struct videobuf_dmabuf dma; -}; - -/* - * Scatter-gather DMA buffer API. - * - * These functions provide a simple way to create a page list and a - * scatter-gather list from a kernel, userspace of physical address and map the - * memory for DMA operation. - * - * Despite the name, this is totally unrelated to videobuf, except that - * videobuf-dma-sg uses the same API internally. - */ -int videobuf_dma_free(struct videobuf_dmabuf *dma); - -int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma); -struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf); - -void *videobuf_sg_alloc(size_t size); - -void videobuf_queue_sg_init(struct videobuf_queue *q, - const struct videobuf_queue_ops *ops, - struct device *dev, - spinlock_t *irqlock, - enum v4l2_buf_type type, - enum v4l2_field field, - unsigned int msize, - void *priv, - struct mutex *ext_lock); - -#endif /* _VIDEOBUF_DMA_SG_H */ - diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h deleted file mode 100644 index e930dbb9d7f4..000000000000 --- a/include/media/videobuf-vmalloc.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * helper functions for vmalloc capture buffers - * - * The functions expect the hardware being able to scatter gather - * (i.e. the buffers are not linear in physical memory, but fragmented - * into PAGE_SIZE chunks). They also assume the driver does not need - * to touch the video data. - * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> - */ -#ifndef _VIDEOBUF_VMALLOC_H -#define _VIDEOBUF_VMALLOC_H - -#include <media/videobuf-core.h> - -/* --------------------------------------------------------------------- */ - -struct videobuf_vmalloc_memory { - u32 magic; - - void *vaddr; - - /* remap_vmalloc_range seems to need to run - * after mmap() on some cases */ - struct vm_area_struct *vma; -}; - -void videobuf_queue_vmalloc_init(struct videobuf_queue *q, - const struct videobuf_queue_ops *ops, - struct device *dev, - spinlock_t *irqlock, - enum v4l2_buf_type type, - enum v4l2_field field, - unsigned int msize, - void *priv, - struct mutex *ext_lock); - -void *videobuf_to_vmalloc(struct videobuf_buffer *buf); - -void videobuf_vmalloc_free(struct videobuf_buffer *buf); - -#endif diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 4b6a9d2ea372..56719a26a46c 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -402,7 +402,7 @@ struct vb2_buffer { * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED. * If you need a minimum number of buffers before you can * start streaming, then set - * &vb2_queue->min_buffers_needed. If that is non-zero + * &vb2_queue->min_queued_buffers. If that is non-zero * then @start_streaming won't be called until at least * that many buffers have been queued up by userspace. * @stop_streaming: called when 'streaming' state must be disabled; driver @@ -546,10 +546,13 @@ struct vb2_buf_ops { * @gfp_flags: additional gfp flags used when allocating the buffers. * Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32 * to force the buffer allocation to a specific memory zone. - * @min_buffers_needed: the minimum number of buffers needed before + * @min_queued_buffers: the minimum number of queued buffers needed before * @start_streaming can be called. Used when a DMA engine * cannot be started unless at least this number of buffers * have been queued into the driver. + * VIDIOC_REQBUFS will ensure at least @min_queued_buffers + * buffers will be allocated. Note that VIDIOC_CREATE_BUFS will not + * modify the requested buffer count. */ /* * Private elements (won't appear at the uAPI book): @@ -558,6 +561,9 @@ struct vb2_buf_ops { * @dma_dir: DMA mapping direction. * @bufs: videobuf2 buffer structures * @num_buffers: number of allocated/used buffers + * @max_num_buffers: upper limit of number of allocated/used buffers. + * If set to 0 v4l2 core will change it VB2_MAX_FRAME + * for backward compatibility. * @queued_list: list of buffers currently queued from userspace * @queued_count: number of buffers queued and ready for streaming. * @owned_by_drv_count: number of buffers owned by the driver @@ -611,7 +617,7 @@ struct vb2_queue { unsigned int buf_struct_size; u32 timestamp_flags; gfp_t gfp_flags; - u32 min_buffers_needed; + u32 min_queued_buffers; struct device *alloc_devs[VB2_MAX_PLANES]; @@ -619,8 +625,9 @@ struct vb2_queue { struct mutex mmap_lock; unsigned int memory; enum dma_data_direction dma_dir; - struct vb2_buffer *bufs[VB2_MAX_FRAME]; + struct vb2_buffer **bufs; unsigned int num_buffers; + unsigned int max_num_buffers; struct list_head queued_list; unsigned int queued_count; @@ -747,7 +754,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q); /** * vb2_core_querybuf() - query video buffer information. * @q: pointer to &struct vb2_queue with videobuf2 queue. - * @index: id number of the buffer. + * @vb: pointer to struct &vb2_buffer. * @pb: buffer struct passed from userspace. * * Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called @@ -759,7 +766,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q); * * Return: returns zero on success; an error code otherwise. */ -void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); +void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb); /** * vb2_core_reqbufs() - Initiate streaming. @@ -823,7 +830,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace * to the kernel. * @q: pointer to &struct vb2_queue with videobuf2 queue. - * @index: id number of the buffer. + * @vb: pointer to struct &vb2_buffer. * @pb: buffer structure passed from userspace to * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. * @@ -839,13 +846,13 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, * * Return: returns zero on success; an error code otherwise. */ -int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb); +int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb); /** * vb2_core_qbuf() - Queue a buffer from userspace * * @q: pointer to &struct vb2_queue with videobuf2 queue. - * @index: id number of the buffer + * @vb: pointer to struct &vb2_buffer. * @pb: buffer structure passed from userspace to * v4l2_ioctl_ops->vidioc_qbuf handler in driver * @req: pointer to &struct media_request, may be NULL. @@ -867,7 +874,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb); * * Return: returns zero on success; an error code otherwise. */ -int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, +int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb, struct media_request *req); /** @@ -931,7 +938,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); * @fd: pointer to the file descriptor associated with DMABUF * (set by driver). * @type: buffer type. - * @index: id number of the buffer. + * @vb: pointer to struct &vb2_buffer. * @plane: index of the plane to be exported, 0 for single plane queues * @flags: file flags for newly created file, as defined at * include/uapi/asm-generic/fcntl.h. @@ -945,7 +952,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); * Return: returns zero on success; an error code otherwise. */ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, - unsigned int index, unsigned int plane, unsigned int flags); + struct vb2_buffer *vb, unsigned int plane, unsigned int flags); /** * vb2_core_queue_init() - initialize a videobuf2 queue @@ -1140,6 +1147,15 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q) } /** + * vb2_get_num_buffers() - get the number of buffer in a queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + */ +static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q) +{ + return q->num_buffers; +} + +/** * vb2_is_busy() - return busy status of the queue. * @q: pointer to &struct vb2_queue with videobuf2 queue. * @@ -1147,7 +1163,7 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q) */ static inline bool vb2_is_busy(struct vb2_queue *q) { - return (q->num_buffers > 0); + return vb2_get_num_buffers(q) > 0; } /** @@ -1239,6 +1255,12 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q, unsigned int index) { + if (!q->bufs) + return NULL; + + if (index >= q->max_num_buffers) + return NULL; + if (index < q->num_buffers) return q->bufs[index]; return NULL; diff --git a/include/net/Space.h b/include/net/Space.h index c29f3d51c078..ef42629f4258 100644 --- a/include/net/Space.h +++ b/include/net/Space.h @@ -10,4 +10,3 @@ struct net_device *smc_init(int unit); struct net_device *cs89x0_probe(int unit); struct net_device *tc515_probe(int unit); struct net_device *lance_probe(int unit); -struct net_device *cops_probe(int unit); diff --git a/include/net/act_api.h b/include/net/act_api.h index 4ae0580b63ca..e1e5e72b901e 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -137,6 +137,7 @@ struct tc_action_ops { #ifdef CONFIG_NET_CLS_ACT +#define ACT_P_BOUND 0 #define ACT_P_CREATED 1 #define ACT_P_DELETED 1 @@ -191,7 +192,7 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tc_action **a, const struct tc_action_ops *ops, int bind, u32 flags); -void tcf_idr_insert_many(struct tc_action *actions[]); +void tcf_idr_insert_many(struct tc_action *actions[], int init_res[]); void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, struct tc_action **a, int bind); @@ -207,8 +208,7 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, struct tc_action *actions[], int init_res[], size_t *attr_size, u32 flags, u32 fl_flags, struct netlink_ext_ack *extack); -struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police, - bool rtnl_held, +struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags, struct netlink_ext_ack *extack); struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 82da55101b5a..61ebe723ee4d 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -31,17 +31,22 @@ struct prefix_info { __u8 length; __u8 prefix_len; + union __packed { + __u8 flags; + struct __packed { #if defined(__BIG_ENDIAN_BITFIELD) - __u8 onlink : 1, + __u8 onlink : 1, autoconf : 1, reserved : 6; #elif defined(__LITTLE_ENDIAN_BITFIELD) - __u8 reserved : 6, + __u8 reserved : 6, autoconf : 1, onlink : 1; #else #error "Please fix <asm/byteorder.h>" #endif + }; + }; __be32 valid; __be32 prefered; __be32 reserved2; @@ -49,6 +54,9 @@ struct prefix_info { struct in6_addr prefix; }; +/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */ +static_assert(sizeof(struct prefix_info) == 32); + #include <linux/ipv6.h> #include <linux/netdevice.h> #include <net/if_inet6.h> diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 5531dd08061e..0754c463224a 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -15,6 +15,7 @@ struct key; struct sock; struct socket; struct rxrpc_call; +struct rxrpc_peer; enum rxrpc_abort_reason; enum rxrpc_interruptibility { @@ -41,13 +42,14 @@ void rxrpc_kernel_new_call_notification(struct socket *, rxrpc_notify_new_call_t, rxrpc_discard_new_call_t); struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, - struct sockaddr_rxrpc *srx, + struct rxrpc_peer *peer, struct key *key, unsigned long user_call_ID, s64 tx_total_len, u32 hard_timeout, gfp_t gfp, rxrpc_notify_rx_t notify_rx, + u16 service_id, bool upgrade, enum rxrpc_interruptibility interruptibility, unsigned int debug_id); @@ -60,9 +62,14 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, u32, int, enum rxrpc_abort_reason); void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call); void rxrpc_kernel_put_call(struct socket *sock, struct rxrpc_call *call); -void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, - struct sockaddr_rxrpc *); -bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *); +struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock, + struct sockaddr_rxrpc *srx, gfp_t gfp); +void rxrpc_kernel_put_peer(struct rxrpc_peer *peer); +struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer); +struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call); +const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer); +const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer); +unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *); int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 824c258143a3..afd40dce40f3 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -46,12 +46,6 @@ struct scm_stat { #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) -#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) -#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) -#define unix_state_lock_nested(s) \ - spin_lock_nested(&unix_sk(s)->lock, \ - SINGLE_DEPTH_NESTING) - /* The AF_UNIX socket */ struct unix_sock { /* WARNING: sk has to be the first member */ @@ -75,6 +69,21 @@ struct unix_sock { }; #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk) +#define unix_peer(sk) (unix_sk(sk)->peer) + +#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) +#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) +enum unix_socket_lock_class { + U_LOCK_NORMAL, + U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */ + U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */ +}; + +static inline void unix_state_lock_nested(struct sock *sk, + enum unix_socket_lock_class subclass) +{ + spin_lock_nested(&unix_sk(sk)->lock, subclass); +} #define peer_wait peer_wq.wait diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index b01cf9ac2437..535701efc1e5 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -137,7 +137,6 @@ struct vsock_transport { u64 (*stream_rcvhiwat)(struct vsock_sock *); bool (*stream_is_active)(struct vsock_sock *); bool (*stream_allow)(u32 cid, u32 port); - int (*set_rcvlowat)(struct vsock_sock *vsk, int val); /* SEQ_PACKET. */ ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg, @@ -168,6 +167,7 @@ struct vsock_transport { struct vsock_transport_send_notify_data *); /* sk_lock held by the caller */ void (*notify_buffer_size)(struct vsock_sock *, u64 *); + int (*notify_set_rcvlowat)(struct vsock_sock *vsk, int val); /* Shutdown. */ int (*shutdown)(struct vsock_sock *, int); @@ -177,6 +177,9 @@ struct vsock_transport { /* Read a single skb */ int (*read_skb)(struct vsock_sock *, skb_read_actor_t); + + /* Zero-copy. */ + bool (*msgzerocopy_allow)(void); }; /**** CORE ****/ @@ -241,4 +244,8 @@ static inline void __init vsock_bpf_build_proto(void) {} #endif +static inline bool vsock_msgzerocopy_allow(const struct vsock_transport *t) +{ + return t->msgzerocopy_allow && t->msgzerocopy_allow(); +} #endif /* __AF_VSOCK_H__ */ diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index aa90adc3b2a4..7ffa8c192c3f 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -541,7 +541,7 @@ static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk, return ERR_PTR(-EFAULT); } - skb->priority = sk->sk_priority; + skb->priority = READ_ONCE(sk->sk_priority); return skb; } diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 87d92accc26e..bdee5d649cc6 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -673,6 +674,8 @@ enum { #define HCI_TX_POWER_INVALID 127 #define HCI_RSSI_INVALID 127 +#define HCI_SYNC_HANDLE_INVALID 0xffff + #define HCI_ROLE_MASTER 0x00 #define HCI_ROLE_SLAVE 0x01 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index e6359f7346f1..8f8dd9173714 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -189,6 +189,7 @@ struct blocked_key { struct smp_csrk { bdaddr_t bdaddr; u8 bdaddr_type; + u8 link_type; u8 type; u8 val[16]; }; @@ -198,6 +199,7 @@ struct smp_ltk { struct rcu_head rcu; bdaddr_t bdaddr; u8 bdaddr_type; + u8 link_type; u8 authenticated; u8 type; u8 enc_size; @@ -212,6 +214,7 @@ struct smp_irk { bdaddr_t rpa; bdaddr_t bdaddr; u8 addr_type; + u8 link_type; u8 val[16]; }; @@ -219,6 +222,8 @@ struct link_key { struct list_head list; struct rcu_head rcu; bdaddr_t bdaddr; + u8 bdaddr_type; + u8 link_type; u8 type; u8 val[HCI_LINK_KEY_SIZE]; u8 pin_len; @@ -350,7 +355,9 @@ struct hci_dev { struct list_head list; struct mutex lock; - char name[8]; + struct ida unset_handle_ida; + + const char *name; unsigned long flags; __u16 id; __u8 bus; @@ -532,7 +539,6 @@ struct hci_dev { struct work_struct tx_work; struct delayed_work le_scan_disable; - struct delayed_work le_scan_restart; struct sk_buff_head rx_q; struct sk_buff_head raw_q; @@ -950,7 +956,6 @@ void hci_inquiry_cache_flush(struct hci_dev *hdev); /* ----- HCI Connections ----- */ enum { HCI_CONN_AUTH_PEND, - HCI_CONN_REAUTH_PEND, HCI_CONN_ENCRYPT_PEND, HCI_CONN_RSWITCH_PEND, HCI_CONN_MODE_CHANGE_PEND, @@ -1225,11 +1230,11 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, continue; /* Match CIG ID if set */ - if (cig != BT_ISO_QOS_CIG_UNSET && cig != c->iso_qos.ucast.cig) + if (cig != c->iso_qos.ucast.cig) continue; /* Match CIS ID if set */ - if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis) + if (id != c->iso_qos.ucast.cis) continue; /* Match destination address if set */ @@ -1290,8 +1295,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, return NULL; } -static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *hdev, - __u8 handle) +static inline struct hci_conn * +hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; @@ -1299,10 +1304,11 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev * rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK) + if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK || + c->state != state) continue; - if (handle != BT_ISO_QOS_BIG_UNSET && handle == c->iso_qos.bcast.big) { + if (handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } @@ -1314,7 +1320,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev * } static inline struct hci_conn * -hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big) +hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; @@ -1336,6 +1342,29 @@ hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big) return NULL; } +static inline struct hci_conn * +hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != ISO_LINK || + !test_bit(HCI_CONN_PA_SYNC, &c->flags)) + continue; + + if (c->sync_handle == sync_handle) { + rcu_read_unlock(); + return c; + } + } + rcu_read_unlock(); + + return NULL; +} + static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { @@ -1377,6 +1406,26 @@ static inline void hci_conn_hash_list_state(struct hci_dev *hdev, rcu_read_unlock(); } +static inline void hci_conn_hash_list_flag(struct hci_dev *hdev, + hci_conn_func_t func, __u8 type, + __u8 flag, void *data) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + if (!func) + return; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == type && test_bit(flag, &c->flags)) + func(c, data); + } + + rcu_read_unlock(); +} + static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; @@ -1426,7 +1475,9 @@ int hci_le_create_cis_pending(struct hci_dev *hdev); int hci_conn_check_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, - u8 role); + u8 role, u16 handle); +struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, + bdaddr_t *dst, u8 role); void hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); void hci_conn_check_pending(struct hci_dev *hdev); diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h index 2d5fcda1bcd0..082f89531b88 100644 --- a/include/net/bluetooth/hci_mon.h +++ b/include/net/bluetooth/hci_mon.h @@ -56,7 +56,7 @@ struct hci_mon_new_index { __u8 type; __u8 bus; bdaddr_t bdaddr; - char name[8]; + char name[8] __nonstring; } __packed; #define HCI_MON_NEW_INDEX_SIZE 16 diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 57eeb07aeb25..6efbc2152146 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -80,6 +80,8 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, u8 *data, u32 flags, u16 min_interval, u16 max_interval, u16 sync_interval); +int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance); + int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, u8 instance, bool force); int hci_disable_advertising_sync(struct hci_dev *hdev); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 3a4b684f89bf..2b54fdd8ca15 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -52,7 +52,7 @@ * such wiphy can have zero, one, or many virtual interfaces associated with * it, which need to be identified as such by pointing the network interface's * @ieee80211_ptr pointer to a &struct wireless_dev which further describes - * the wireless part of the interface, normally this struct is embedded in the + * the wireless part of the interface. Normally this struct is embedded in the * network interface's private data area. Drivers can optionally allow creating * or destroying virtual interfaces on the fly, but without at least one or the * ability to create some the wireless device isn't useful. @@ -76,6 +76,8 @@ struct wiphy; * @IEEE80211_CHAN_DISABLED: This channel is disabled. * @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes * sending probe requests or beaconing. + * @IEEE80211_CHAN_PSD: Power spectral density (in dBm) is set for this + * channel. * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel. * @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel * is not permitted. @@ -115,11 +117,16 @@ struct wiphy; * This may be due to the driver or due to regulatory bandwidth * restrictions. * @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel. + * @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT + * @IEEE80211_CHAN_NO_UHB_VLP_CLIENT: Client connection with VLP AP + * not permitted using this channel + * @IEEE80211_CHAN_NO_UHB_AFC_CLIENT: Client connection with AFC AP + * not permitted using this channel */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, IEEE80211_CHAN_NO_IR = 1<<1, - /* hole at 1<<2 */ + IEEE80211_CHAN_PSD = 1<<2, IEEE80211_CHAN_RADAR = 1<<3, IEEE80211_CHAN_NO_HT40PLUS = 1<<4, IEEE80211_CHAN_NO_HT40MINUS = 1<<5, @@ -138,6 +145,9 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_16MHZ = 1<<18, IEEE80211_CHAN_NO_320MHZ = 1<<19, IEEE80211_CHAN_NO_EHT = 1<<20, + IEEE80211_CHAN_DFS_CONCURRENT = 1<<21, + IEEE80211_CHAN_NO_UHB_VLP_CLIENT= 1<<22, + IEEE80211_CHAN_NO_UHB_AFC_CLIENT= 1<<23, }; #define IEEE80211_CHAN_NO_HT40 \ @@ -171,6 +181,7 @@ enum ieee80211_channel_flags { * on this channel. * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered. * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels. + * @psd: power spectral density (in dBm) */ struct ieee80211_channel { enum nl80211_band band; @@ -187,6 +198,7 @@ struct ieee80211_channel { enum nl80211_dfs_state dfs_state; unsigned long dfs_state_entered; unsigned int dfs_cac_ms; + s8 psd; }; /** @@ -410,6 +422,19 @@ struct ieee80211_sta_eht_cap { u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN]; }; +/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ +#ifdef __CHECKER__ +/* + * This is used to mark the sband->iftype_data pointer which is supposed + * to be an array with special access semantics (per iftype), but a lot + * of code got it wrong in the past, so with this marking sparse will be + * noisy when the pointer is used directly. + */ +# define __iftd __attribute__((noderef, address_space(__iftype_data))) +#else +# define __iftd +#endif /* __CHECKER__ */ + /** * struct ieee80211_sband_iftype_data - sband data per interface type * @@ -543,10 +568,48 @@ struct ieee80211_supported_band { struct ieee80211_sta_s1g_cap s1g_cap; struct ieee80211_edmg edmg_cap; u16 n_iftype_data; - const struct ieee80211_sband_iftype_data *iftype_data; + const struct ieee80211_sband_iftype_data __iftd *iftype_data; }; /** + * _ieee80211_set_sband_iftype_data - set sband iftype data array + * @sband: the sband to initialize + * @iftd: the iftype data array pointer + * @n_iftd: the length of the iftype data array + * + * Set the sband iftype data array; use this where the length cannot + * be derived from the ARRAY_SIZE() of the argument, but prefer + * ieee80211_set_sband_iftype_data() where it can be used. + */ +static inline void +_ieee80211_set_sband_iftype_data(struct ieee80211_supported_band *sband, + const struct ieee80211_sband_iftype_data *iftd, + u16 n_iftd) +{ + sband->iftype_data = (const void __iftd __force *)iftd; + sband->n_iftype_data = n_iftd; +} + +/** + * ieee80211_set_sband_iftype_data - set sband iftype data array + * @sband: the sband to initialize + * @iftd: the iftype data array + */ +#define ieee80211_set_sband_iftype_data(sband, iftd) \ + _ieee80211_set_sband_iftype_data(sband, iftd, ARRAY_SIZE(iftd)) + +/** + * for_each_sband_iftype_data - iterate sband iftype data entries + * @sband: the sband whose iftype_data array to iterate + * @i: iterator counter + * @iftd: iftype data pointer to set + */ +#define for_each_sband_iftype_data(sband, i, iftd) \ + for (i = 0, iftd = (const void __force *)&(sband)->iftype_data[i]; \ + i < (sband)->n_iftype_data; \ + i++, iftd = (const void __force *)&(sband)->iftype_data[i]) + +/** * ieee80211_get_sband_iftype_data - return sband data for a given iftype * @sband: the sband to search for the STA on * @iftype: enum nl80211_iftype @@ -557,6 +620,7 @@ static inline const struct ieee80211_sband_iftype_data * ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, u8 iftype) { + const struct ieee80211_sband_iftype_data *data; int i; if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) @@ -565,10 +629,7 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, if (iftype == NL80211_IFTYPE_AP_VLAN) iftype = NL80211_IFTYPE_AP; - for (i = 0; i < sband->n_iftype_data; i++) { - const struct ieee80211_sband_iftype_data *data = - &sband->iftype_data[i]; - + for_each_sband_iftype_data(sband, i, data) { if (data->types_mask & BIT(iftype)) return data; } @@ -924,6 +985,15 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1, const struct cfg80211_chan_def *chandef2); /** + * nl80211_chan_width_to_mhz - get the channel width in MHz + * @chan_width: the channel width from &enum nl80211_chan_width + * + * Return: channel width in MHz if the chan_width from &enum nl80211_chan_width + * is valid. -1 otherwise. + */ +int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width); + +/** * cfg80211_chandef_valid - check if a channel definition is valid * @chandef: the channel definition to check * Return: %true if the channel definition is valid. %false otherwise. @@ -954,6 +1024,30 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy, enum nl80211_iftype iftype); /** + * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable and we + * can/need start CAC on such channel + * @wiphy: the wiphy to validate against + * @chandef: the channel definition to check + * + * Return: true if all channels available and at least + * one channel requires CAC (NL80211_DFS_USABLE) + */ +bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy, + const struct cfg80211_chan_def *chandef); + +/** + * cfg80211_chandef_dfs_cac_time - get the DFS CAC time (in ms) for given + * channel definition + * @wiphy: the wiphy to validate against + * @chandef: the channel definition to check + * + * Returns: DFS CAC time (in ms) which applies for this channel definition + */ +unsigned int +cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, + const struct cfg80211_chan_def *chandef); + +/** * nl80211_send_chandef - sends the channel definition. * @msg: the msg to send channel definition * @chandef: the channel definition to check @@ -1289,6 +1383,7 @@ struct cfg80211_acl_data { * struct cfg80211_fils_discovery - FILS discovery parameters from * IEEE Std 802.11ai-2016, Annex C.3 MIB detail. * + * @update: Set to true if the feature configuration should be updated. * @min_interval: Minimum packet interval in TUs (0 - 10000) * @max_interval: Maximum packet interval in TUs (0 - 10000) * @tmpl_len: Template length @@ -1296,6 +1391,7 @@ struct cfg80211_acl_data { * frame headers. */ struct cfg80211_fils_discovery { + bool update; u32 min_interval; u32 max_interval; size_t tmpl_len; @@ -1306,6 +1402,7 @@ struct cfg80211_fils_discovery { * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe * response parameters in 6GHz. * + * @update: Set to true if the feature configuration should be updated. * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned * in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive * scanning @@ -1313,6 +1410,7 @@ struct cfg80211_fils_discovery { * @tmpl: Template data for probe response */ struct cfg80211_unsol_bcast_probe_resp { + bool update; u32 interval; size_t tmpl_len; const u8 *tmpl; @@ -1399,6 +1497,22 @@ struct cfg80211_ap_settings { u16 punct_bitmap; }; + +/** + * struct cfg80211_ap_update - AP configuration update + * + * Subset of &struct cfg80211_ap_settings, for updating a running AP. + * + * @beacon: beacon data + * @fils_discovery: FILS discovery transmission parameters + * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters + */ +struct cfg80211_ap_update { + struct cfg80211_beacon_data beacon; + struct cfg80211_fils_discovery fils_discovery; + struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; +}; + /** * struct cfg80211_csa_settings - channel switch settings * @@ -1568,6 +1682,21 @@ struct link_station_del_parameters { }; /** + * struct cfg80211_ttlm_params: TID to link mapping parameters + * + * Used for setting a TID to link mapping. + * + * @dlink: Downlink TID to link mapping, as defined in section 9.4.2.314 + * (TID-To-Link Mapping element) in Draft P802.11be_D4.0. + * @ulink: Uplink TID to link mapping, as defined in section 9.4.2.314 + * (TID-To-Link Mapping element) in Draft P802.11be_D4.0. + */ +struct cfg80211_ttlm_params { + u16 dlink[8]; + u16 ulink[8]; +}; + +/** * struct station_parameters - station parameters * * Used to change and create a new station. @@ -2346,7 +2475,7 @@ struct mesh_config { * @user_mpm: userspace handles all MPM functions * @dtim_period: DTIM period to use * @beacon_interval: beacon interval to use - * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] + * @mcast_rate: multicast rate for Mesh Node [6Mbps is the default for 802.11a] * @basic_rates: basic rates to use when creating the mesh * @beacon_rate: bitrate to be used for beacons * @userspace_handles_dfs: whether user space controls DFS operation, i.e. @@ -2463,7 +2592,7 @@ struct cfg80211_scan_info { * @short_ssid: short ssid to scan for * @bssid: bssid to scan for * @channel_idx: idx of the channel in the channel array in the scan request - * which the above info relvant to + * which the above info is relevant to * @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU * @short_ssid_valid: @short_ssid is valid and can be used * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait @@ -2487,7 +2616,6 @@ struct cfg80211_scan_6ghz_params { * @n_ssids: number of SSIDs * @channels: channels to scan on. * @n_channels: total number of channels to scan - * @scan_width: channel width for scanning * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets * @duration: how long to listen on each channel, in TUs. If @@ -2512,12 +2640,13 @@ struct cfg80211_scan_6ghz_params { * @n_6ghz_params: number of 6 GHz params * @scan_6ghz_params: 6 GHz params * @bssid: BSSID to scan for (most commonly, the wildcard BSSID) + * @tsf_report_link_id: for MLO, indicates the link ID of the BSS that should be + * used for TSF reporting. Can be set to -1 to indicate no preference. */ struct cfg80211_scan_request { struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; - enum nl80211_bss_scan_width scan_width; const u8 *ie; size_t ie_len; u16 duration; @@ -2541,6 +2670,7 @@ struct cfg80211_scan_request { bool scan_6ghz; u32 n_6ghz_params; struct cfg80211_scan_6ghz_params *scan_6ghz_params; + s8 tsf_report_link_id; /* keep last */ struct ieee80211_channel *channels[] __counted_by(n_channels); @@ -2566,7 +2696,7 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) * or no match (RSSI only) * @rssi_thold: don't report scan results below this threshold (in s32 dBm) * @per_band_rssi_thold: Minimum rssi threshold for each band to be applied - * for filtering out scan results received. Drivers advertize this support + * for filtering out scan results received. Drivers advertise this support * of band specific rssi based filtering through the feature capability * %NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band * specific rssi thresholds take precedence over rssi_thold, if specified. @@ -2612,14 +2742,13 @@ struct cfg80211_bss_select_adjust { * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans) * @n_ssids: number of SSIDs * @n_channels: total number of channels to scan - * @scan_width: channel width for scanning * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets * @flags: control flags from &enum nl80211_scan_flags * @match_sets: sets of parameters to be matched for a scan result * entry to be considered valid and to be passed to the host * (others are filtered out). - * If ommited, all results are passed. + * If omitted, all results are passed. * @n_match_sets: number of match sets * @report_results: indicates that results were reported for this request * @wiphy: the wiphy this was for @@ -2653,14 +2782,13 @@ struct cfg80211_bss_select_adjust { * to the specified band while deciding whether a better BSS is reported * using @relative_rssi. If delta is a negative number, the BSSs that * belong to the specified band will be penalized by delta dB in relative - * comparisions. + * comparisons. */ struct cfg80211_sched_scan_request { u64 reqid; struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; - enum nl80211_bss_scan_width scan_width; const u8 *ie; size_t ie_len; u32 flags; @@ -2708,7 +2836,6 @@ enum cfg80211_signal_type { /** * struct cfg80211_inform_bss - BSS inform data * @chan: channel the frame was received on - * @scan_width: scan width that was used * @signal: signal strength value, according to the wiphy's * signal type * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was @@ -2724,11 +2851,17 @@ enum cfg80211_signal_type { * the BSS that requested the scan in which the beacon/probe was received. * @chains: bitmask for filled values in @chain_signal. * @chain_signal: per-chain signal strength of last received BSS in dBm. + * @restrict_use: restrict usage, if not set, assume @use_for is + * %NL80211_BSS_USE_FOR_NORMAL. + * @use_for: bitmap of possible usage for this BSS, see + * &enum nl80211_bss_use_for + * @cannot_use_reasons: the reasons (bitmap) for not being able to connect, + * if @restrict_use is set and @use_for is zero (empty); may be 0 for + * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons * @drv_data: Data to be passed through to @inform_bss */ struct cfg80211_inform_bss { struct ieee80211_channel *chan; - enum nl80211_bss_scan_width scan_width; s32 signal; u64 boottime_ns; u64 parent_tsf; @@ -2736,6 +2869,9 @@ struct cfg80211_inform_bss { u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; + u8 restrict_use:1, use_for:7; + u8 cannot_use_reasons; + void *drv_data; }; @@ -2762,7 +2898,6 @@ struct cfg80211_bss_ies { * for use in scan results and similar. * * @channel: channel this BSS is on - * @scan_width: width of the control channel * @bssid: BSSID of the BSS * @beacon_interval: the beacon interval as from the frame * @capability: the capability field in host byte order @@ -2775,6 +2910,8 @@ struct cfg80211_bss_ies { * own the beacon_ies, but they're just pointers to the ones from the * @hidden_beacon_bss struct) * @proberesp_ies: the information elements from the last Probe Response frame + * @proberesp_ecsa_stuck: ECSA element is stuck in the Probe Response frame, + * cannot rely on it having valid data * @hidden_beacon_bss: in case this BSS struct represents a probe response from * a BSS that hides the SSID in its beacon, this points to the BSS struct * that holds the beacon data. @beacon_ies is still valid, of course, and @@ -2788,11 +2925,15 @@ struct cfg80211_bss_ies { * @chain_signal: per-chain signal strength of last received BSS in dBm. * @bssid_index: index in the multiple BSS set * @max_bssid_indicator: max number of members in the BSS set + * @use_for: bitmap of possible usage for this BSS, see + * &enum nl80211_bss_use_for + * @cannot_use_reasons: the reasons (bitmap) for not being able to connect, + * if @restrict_use is set and @use_for is zero (empty); may be 0 for + * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes */ struct cfg80211_bss { struct ieee80211_channel *channel; - enum nl80211_bss_scan_width scan_width; const struct cfg80211_bss_ies __rcu *ies; const struct cfg80211_bss_ies __rcu *beacon_ies; @@ -2811,9 +2952,14 @@ struct cfg80211_bss { u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; + u8 proberesp_ecsa_stuck:1; + u8 bssid_index; u8 max_bssid_indicator; + u8 use_for; + u8 cannot_use_reasons; + u8 priv[] __aligned(sizeof(void *)); }; @@ -2891,12 +3037,15 @@ struct cfg80211_auth_request { * @elems_len: length of the elements * @disabled: If set this link should be included during association etc. but it * should not be used until enabled by the AP MLD. + * @error: per-link error code, must be <= 0. If there is an error, then the + * operation as a whole must fail. */ struct cfg80211_assoc_link { struct cfg80211_bss *bss; const u8 *elems; size_t elems_len; bool disabled; + int error; }; /** @@ -3088,8 +3237,8 @@ struct cfg80211_ibss_params { * * @behaviour: requested BSS selection behaviour. * @param: parameters for requestion behaviour. - * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF. - * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST. + * @param.band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF. + * @param.adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST. */ struct cfg80211_bss_selection { enum nl80211_bss_select_attr behaviour; @@ -3495,7 +3644,7 @@ struct cfg80211_update_ft_ies_params { * This structure provides information needed to transmit a mgmt frame * * @chan: channel to use - * @offchan: indicates wether off channel operation is required + * @offchan: indicates whether off channel operation is required * @wait: duration for ROC * @buf: buffer to transmit * @len: buffer length @@ -3613,7 +3762,7 @@ struct cfg80211_nan_func_filter { * @publish_bcast: if true, the solicited publish should be broadcasted * @subscribe_active: if true, the subscribe is active * @followup_id: the instance ID for follow up - * @followup_reqid: the requestor instance ID for follow up + * @followup_reqid: the requester instance ID for follow up * @followup_dest: MAC address of the recipient of the follow up * @ttl: time to live counter in DW. * @serv_spec_info: Service Specific Info @@ -4401,6 +4550,7 @@ struct mgmt_frame_regs { * @del_link_station: Remove a link of a station. * * @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames. + * @set_ttlm: set the TID to link mapping. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -4450,7 +4600,7 @@ struct cfg80211_ops { int (*start_ap)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings); int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *info); + struct cfg80211_ap_update *info); int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id); @@ -4760,6 +4910,8 @@ struct cfg80211_ops { struct link_station_del_parameters *params); int (*set_hw_timestamp)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_set_hw_timestamp *hwts); + int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ttlm_params *params); }; /* @@ -4816,6 +4968,10 @@ struct cfg80211_ops { * @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys. * @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for * NL80211_REGDOM_SET_BY_DRIVER. + * @WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON: reg_call_notifier() is called if driver + * set this flag to update channels on beacon hints. + * @WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY: support connection to non-primary link + * of an NSTR mobile AP MLD. */ enum wiphy_flags { WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0), @@ -4829,7 +4985,7 @@ enum wiphy_flags { WIPHY_FLAG_IBSS_RSN = BIT(8), WIPHY_FLAG_MESH_AUTH = BIT(10), WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11), - /* use hole at 12 */ + WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = BIT(12), WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13), WIPHY_FLAG_AP_UAPSD = BIT(14), WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), @@ -4842,6 +4998,7 @@ enum wiphy_flags { WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23), WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = BIT(24), + WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON = BIT(25), }; /** @@ -5826,6 +5983,16 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work); */ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work); +/** + * wiphy_work_flush - flush previously queued work + * @wiphy: the wiphy, for debug purposes + * @work: the work to flush, this can be %NULL to flush all work + * + * Flush the work (i.e. run it if pending). This must be called + * under the wiphy mutex acquired by wiphy_lock(). + */ +void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work); + struct wiphy_delayed_work { struct wiphy_work work; struct wiphy *wiphy; @@ -5870,6 +6037,17 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy, struct wiphy_delayed_work *dwork); /** + * wiphy_delayed_work_flush - flush previously queued delayed work + * @wiphy: the wiphy, for debug purposes + * @dwork: the delayed work to flush + * + * Flush the work (i.e. run it if pending). This must be called + * under the wiphy mutex acquired by wiphy_lock(). + */ +void wiphy_delayed_work_flush(struct wiphy *wiphy, + struct wiphy_delayed_work *dwork); + +/** * struct wireless_dev - wireless device state * * For netdevs, this structure must be allocated by the driver @@ -5897,7 +6075,6 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy, * wireless device if it has no netdev * @u: union containing data specific to @iftype * @connected: indicates if connected or not (STA mode) - * @bssid: (private) Used by the internal configuration code * @wext: (private) Used by the internal wireless extensions compat code * @wext.ibss: (private) IBSS data part of wext handling * @wext.connect: (private) connection handling data @@ -5917,10 +6094,6 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy, * @mgmt_registrations: list of registrations for management frames * @mgmt_registrations_need_update: mgmt registrations were updated, * need to propagate the update to the driver - * @mtx: mutex used to lock data in this struct, may be used by drivers - * and some API functions require it held - * @beacon_interval: beacon interval used on this device for transmitting - * beacons, 0 when not valid * @address: The address for this device, valid only if @netdev is %NULL * @is_running: true if this is a non-netdev device that has been started, e.g. * the P2P Device. @@ -5941,6 +6114,7 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy, * @event_lock: (private) lock for event list * @owner_nlportid: (private) owner socket port ID * @nl_owner_dead: (private) owner socket went away + * @cqm_rssi_work: (private) CQM RSSI reporting work * @cqm_config: (private) nl80211 RSSI monitor state * @pmsr_list: (private) peer measurement requests * @pmsr_lock: (private) peer measurements requests/results lock @@ -5964,8 +6138,6 @@ struct wireless_dev { struct list_head mgmt_registrations; u8 mgmt_registrations_need_update:1; - struct mutex mtx; - bool use_4addr, is_running, registered, registering; u8 address[ETH_ALEN] __aligned(sizeof(u16)); @@ -6013,7 +6185,8 @@ struct wireless_dev { } wext; #endif - struct cfg80211_cqm_config *cqm_config; + struct wiphy_work cqm_rssi_work; + struct cfg80211_cqm_config __rcu *cqm_config; struct list_head pmsr_list; spinlock_t pmsr_lock; @@ -6255,13 +6428,11 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband, /** * ieee80211_mandatory_rates - get mandatory rates for a given band * @sband: the band to look for rates in - * @scan_width: width of the control channel * * This function returns a bitmap of the mandatory rates for the given * band, bits are set according to the rate position in the bitrates array. */ -u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, - enum nl80211_bss_scan_width scan_width); +u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband); /* * Radiotap parsing functions -- for controlled injection support @@ -6602,7 +6773,7 @@ static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) * @ies: data consisting of IEs * @len: length of data * - * Return: %NULL if the etended element could not be found or if + * Return: %NULL if the extended element could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match; otherwise return the * requested element struct. @@ -6749,7 +6920,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2); /** * regulatory_set_wiphy_regd - set regdom info for self managed drivers * @wiphy: the wireless device we want to process the regulatory domain on - * @rd: the regulatory domain informatoin to use for this wiphy + * @rd: the regulatory domain information to use for this wiphy * * Set the regulatory domain information for self-managed wiphys, only they * may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more @@ -6840,7 +7011,7 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy); * Regulatory self-managed driver can use it to proactively * * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried. - * @freq: the freqency(in MHz) to be queried. + * @freq: the frequency (in MHz) to be queried. * @rule: pointer to store the wmm rule from the regulatory db. * * Self-managed wireless drivers can use this function to query @@ -6923,22 +7094,6 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, gfp_t gfp); static inline struct cfg80211_bss * __must_check -cfg80211_inform_bss_width_frame(struct wiphy *wiphy, - struct ieee80211_channel *rx_channel, - enum nl80211_bss_scan_width scan_width, - struct ieee80211_mgmt *mgmt, size_t len, - s32 signal, gfp_t gfp) -{ - struct cfg80211_inform_bss data = { - .chan = rx_channel, - .scan_width = scan_width, - .signal = signal, - }; - - return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); -} - -static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, struct ieee80211_mgmt *mgmt, size_t len, @@ -6946,7 +7101,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, { struct cfg80211_inform_bss data = { .chan = rx_channel, - .scan_width = NL80211_BSS_CHAN_WIDTH_20, .signal = signal, }; @@ -7021,6 +7175,23 @@ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, enum nl80211_band band); /** + * cfg80211_ssid_eq - compare two SSIDs + * @a: first SSID + * @b: second SSID + * + * Return: %true if SSIDs are equal, %false otherwise. + */ +static inline bool +cfg80211_ssid_eq(struct cfg80211_ssid *a, struct cfg80211_ssid *b) +{ + if (WARN_ON(!a || !b)) + return false; + if (a->ssid_len != b->ssid_len) + return false; + return memcmp(a->ssid, b->ssid, a->ssid_len) ? false : true; +} + +/** * cfg80211_inform_bss_data - inform cfg80211 of a new BSS * * @wiphy: the wiphy reporting the BSS @@ -7049,26 +7220,6 @@ cfg80211_inform_bss_data(struct wiphy *wiphy, gfp_t gfp); static inline struct cfg80211_bss * __must_check -cfg80211_inform_bss_width(struct wiphy *wiphy, - struct ieee80211_channel *rx_channel, - enum nl80211_bss_scan_width scan_width, - enum cfg80211_bss_frame_type ftype, - const u8 *bssid, u64 tsf, u16 capability, - u16 beacon_interval, const u8 *ie, size_t ielen, - s32 signal, gfp_t gfp) -{ - struct cfg80211_inform_bss data = { - .chan = rx_channel, - .scan_width = scan_width, - .signal = signal, - }; - - return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf, - capability, beacon_interval, ie, ielen, - gfp); -} - -static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum cfg80211_bss_frame_type ftype, @@ -7078,7 +7229,6 @@ cfg80211_inform_bss(struct wiphy *wiphy, { struct cfg80211_inform_bss data = { .chan = rx_channel, - .scan_width = NL80211_BSS_CHAN_WIDTH_20, .signal = signal, }; @@ -7088,6 +7238,25 @@ cfg80211_inform_bss(struct wiphy *wiphy, } /** + * __cfg80211_get_bss - get a BSS reference + * @wiphy: the wiphy this BSS struct belongs to + * @channel: the channel to search on (or %NULL) + * @bssid: the desired BSSID (or %NULL) + * @ssid: the desired SSID (or %NULL) + * @ssid_len: length of the SSID (or 0) + * @bss_type: type of BSS, see &enum ieee80211_bss_type + * @privacy: privacy filter, see &enum ieee80211_privacy + * @use_for: indicates which use is intended + */ +struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, + const u8 *ssid, size_t ssid_len, + enum ieee80211_bss_type bss_type, + enum ieee80211_privacy privacy, + u32 use_for); + +/** * cfg80211_get_bss - get a BSS reference * @wiphy: the wiphy this BSS struct belongs to * @channel: the channel to search on (or %NULL) @@ -7096,13 +7265,20 @@ cfg80211_inform_bss(struct wiphy *wiphy, * @ssid_len: length of the SSID (or 0) * @bss_type: type of BSS, see &enum ieee80211_bss_type * @privacy: privacy filter, see &enum ieee80211_privacy + * + * This version implies regular usage, %NL80211_BSS_USE_FOR_NORMAL. */ -struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, - struct ieee80211_channel *channel, - const u8 *bssid, - const u8 *ssid, size_t ssid_len, - enum ieee80211_bss_type bss_type, - enum ieee80211_privacy privacy); +static inline struct cfg80211_bss * +cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, + const u8 *bssid, const u8 *ssid, size_t ssid_len, + enum ieee80211_bss_type bss_type, + enum ieee80211_privacy privacy) +{ + return __cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, + bss_type, privacy, + NL80211_BSS_USE_FOR_NORMAL); +} + static inline struct cfg80211_bss * cfg80211_get_ibss(struct wiphy *wiphy, struct ieee80211_channel *channel, @@ -7163,19 +7339,6 @@ void cfg80211_bss_iter(struct wiphy *wiphy, void *data), void *iter_data); -static inline enum nl80211_bss_scan_width -cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef) -{ - switch (chandef->width) { - case NL80211_CHAN_WIDTH_5: - return NL80211_BSS_CHAN_WIDTH_5; - case NL80211_CHAN_WIDTH_10: - return NL80211_BSS_CHAN_WIDTH_10; - default: - return NL80211_BSS_CHAN_WIDTH_20; - } -} - /** * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame * @dev: network device @@ -7208,9 +7371,7 @@ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); /** - * struct cfg80211_rx_assoc_resp - association response data - * @bss: the BSS that association was requested with, ownership of the pointer - * moves to cfg80211 in the call to cfg80211_rx_assoc_resp() + * struct cfg80211_rx_assoc_resp_data - association response data * @buf: (Re)Association Response frame (header + body) * @len: length of the frame data * @uapsd_queues: bitmap of queues configured for uapsd. Same format @@ -7220,10 +7381,12 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); * @ap_mld_addr: AP MLD address (in case of MLO) * @links: per-link information indexed by link ID, use links[0] for * non-MLO connections + * @links.bss: the BSS that association was requested with, ownership of the + * pointer moves to cfg80211 in the call to cfg80211_rx_assoc_resp() * @links.status: Set this (along with a BSS pointer) for links that * were rejected by the AP. */ -struct cfg80211_rx_assoc_resp { +struct cfg80211_rx_assoc_resp_data { const u8 *buf; size_t len; const u8 *req_ies; @@ -7231,7 +7394,7 @@ struct cfg80211_rx_assoc_resp { int uapsd_queues; const u8 *ap_mld_addr; struct { - const u8 *addr; + u8 addr[ETH_ALEN] __aligned(2); struct cfg80211_bss *bss; u16 status; } links[IEEE80211_MLD_MAX_NUM_LINKS]; @@ -7240,7 +7403,7 @@ struct cfg80211_rx_assoc_resp { /** * cfg80211_rx_assoc_resp - notification of processed association response * @dev: network device - * @data: association response data, &struct cfg80211_rx_assoc_resp + * @data: association response data, &struct cfg80211_rx_assoc_resp_data * * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). @@ -7248,7 +7411,7 @@ struct cfg80211_rx_assoc_resp { * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_rx_assoc_resp(struct net_device *dev, - struct cfg80211_rx_assoc_resp *data); + const struct cfg80211_rx_assoc_resp_data *data); /** * struct cfg80211_assoc_failure - association failure data @@ -7367,7 +7530,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, * RFkill integration in cfg80211 is almost invisible to drivers, * as cfg80211 automatically registers an rfkill instance for each * wireless device it knows about. Soft kill is also translated - * into disconnecting and turning all interfaces off, drivers are + * into disconnecting and turning all interfaces off. Drivers are * expected to turn off the device when all interfaces are down. * * However, devices may have a hard RFkill line, in which case they @@ -7415,7 +7578,7 @@ static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy) * the configuration mechanism. * * A driver supporting vendor commands must register them as an array - * in struct wiphy, with handlers for each one, each command has an + * in struct wiphy, with handlers for each one. Each command has an * OUI and sub command ID to identify it. * * Note that this feature should not be (ab)used to implement protocol @@ -7579,7 +7742,7 @@ static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp) * interact with driver-specific tools to aid, for instance, * factory programming. * - * This chapter describes how drivers interact with it, for more + * This chapter describes how drivers interact with it. For more * information see the nl80211 book's chapter on it. */ @@ -7967,7 +8130,8 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, * cfg80211_port_authorized - notify cfg80211 of successful security association * * @dev: network device - * @bssid: the BSSID of the AP + * @peer_addr: BSSID of the AP/P2P GO in case of STA/GC or STA/GC MAC address + * in case of AP/P2P GO * @td_bitmap: transition disable policy * @td_bitmap_len: Length of transition disable policy * @gfp: allocation flags @@ -7978,8 +8142,11 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, * should be preceded with a call to cfg80211_connect_result(), * cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to * indicate the 802.11 association. + * This function can also be called by AP/P2P GO driver that supports + * authentication offload. In this case the peer_mac passed is that of + * associated STA/GC. */ -void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid, +void cfg80211_port_authorized(struct net_device *dev, const u8 *peer_addr, const u8* td_bitmap, u8 td_bitmap_len, gfp_t gfp); /** @@ -8568,7 +8735,7 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, * @link_id: the link ID for MLO, must be 0 for non-MLO * @punct_bitmap: the new puncturing bitmap * - * Caller must acquire wdev_lock, therefore must only be called from sleepable + * Caller must hold wiphy mutex, therefore must only be called from sleepable * driver context! */ void cfg80211_ch_switch_notify(struct net_device *dev, @@ -8808,6 +8975,18 @@ static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen, } /** + * ieee80211_fragment_element - fragment the last element in skb + * @skb: The skbuf that the element was added to + * @len_pos: Pointer to length of the element to fragment + * @frag_id: The element ID to use for fragments + * + * This function fragments all data after @len_pos, adding fragmentation + * elements with the given ID as appropriate. The SKB will grow in size + * accordingly. + */ +void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id); + +/** * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN * @wdev: the wireless device reporting the wakeup * @wakeup: the wakeup report @@ -9056,9 +9235,9 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, /** * cfg80211_assoc_comeback - notification of association that was - * temporarly rejected with a comeback + * temporarily rejected with a comeback * @netdev: network device - * @ap_addr: AP (MLD) address that rejected the assocation + * @ap_addr: AP (MLD) address that rejected the association * @timeout: timeout interval value TUs. * * this function may sleep. the caller must hold the corresponding wdev's mutex. @@ -9222,4 +9401,60 @@ bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap, */ void cfg80211_links_removed(struct net_device *dev, u16 link_mask); +/** + * cfg80211_schedule_channels_check - schedule regulatory check if needed + * @wdev: the wireless device to check + * + * In case the device supports NO_IR or DFS relaxations, schedule regulatory + * channels check, as previous concurrent operation conditions may not + * hold anymore. + */ +void cfg80211_schedule_channels_check(struct wireless_dev *wdev); + +#ifdef CONFIG_CFG80211_DEBUGFS +/** + * wiphy_locked_debugfs_read - do a locked read in debugfs + * @wiphy: the wiphy to use + * @file: the file being read + * @buf: the buffer to fill and then read from + * @bufsize: size of the buffer + * @userbuf: the user buffer to copy to + * @count: read count + * @ppos: read position + * @handler: the read handler to call (under wiphy lock) + * @data: additional data to pass to the read handler + */ +ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, + char *buf, size_t bufsize, + char __user *userbuf, size_t count, + loff_t *ppos, + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t bufsize, + void *data), + void *data); + +/** + * wiphy_locked_debugfs_write - do a locked write in debugfs + * @wiphy: the wiphy to use + * @file: the file being written to + * @buf: the buffer to copy the user data to + * @bufsize: size of the buffer + * @userbuf: the user buffer to copy from + * @count: read count + * @handler: the write handler to call (under wiphy lock) + * @data: additional data to pass to the write handler + */ +ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file, + char *buf, size_t bufsize, + const char __user *userbuf, size_t count, + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t count, + void *data), + void *data); +#endif + #endif /* __NET_CFG80211_H */ diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index f79ce133e51a..cd95711b12b8 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -20,6 +20,7 @@ struct wpan_phy; struct wpan_phy_cca; struct cfg802154_scan_request; struct cfg802154_beacon_request; +struct ieee802154_addr; #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL struct ieee802154_llsec_device_key; @@ -77,6 +78,12 @@ struct cfg802154_ops { struct cfg802154_beacon_request *request); int (*stop_beacons)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev); + int (*associate)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + struct ieee802154_addr *coord); + int (*disassociate)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + struct ieee802154_addr *target); #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL void (*get_llsec_table)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, @@ -304,6 +311,22 @@ struct ieee802154_coord_desc { }; /** + * struct ieee802154_pan_device - PAN device information + * @pan_id: the PAN ID of this device + * @mode: the preferred mode to reach the device + * @short_addr: the short address of this device + * @extended_addr: the extended address of this device + * @node: the list node + */ +struct ieee802154_pan_device { + __le16 pan_id; + u8 mode; + __le16 short_addr; + __le64 extended_addr; + struct list_head node; +}; + +/** * struct cfg802154_scan_request - Scan request * * @type: type of scan to be performed @@ -478,6 +501,13 @@ struct wpan_dev { /* fallback for acknowledgment bit setting */ bool ackreq; + + /* Associations */ + struct mutex association_lock; + struct ieee802154_pan_device *parent; + struct list_head children; + unsigned int max_associations; + unsigned int nchildren; }; #define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) @@ -529,4 +559,46 @@ static inline const char *wpan_phy_name(struct wpan_phy *phy) void ieee802154_configure_durations(struct wpan_phy *phy, unsigned int page, unsigned int channel); +/** + * cfg802154_device_is_associated - Checks whether we are associated to any device + * @wpan_dev: the wpan device + * @return: true if we are associated + */ +bool cfg802154_device_is_associated(struct wpan_dev *wpan_dev); + +/** + * cfg802154_device_is_parent - Checks if a device is our coordinator + * @wpan_dev: the wpan device + * @target: the expected parent + * @return: true if @target is our coordinator + */ +bool cfg802154_device_is_parent(struct wpan_dev *wpan_dev, + struct ieee802154_addr *target); + +/** + * cfg802154_device_is_child - Checks whether a device is associated to us + * @wpan_dev: the wpan device + * @target: the expected child + * @return: the PAN device + */ +struct ieee802154_pan_device * +cfg802154_device_is_child(struct wpan_dev *wpan_dev, + struct ieee802154_addr *target); + +/** + * cfg802154_set_max_associations - Limit the number of future associations + * @wpan_dev: the wpan device + * @max: the maximum number of devices we accept to associate + * @return: the old maximum value + */ +unsigned int cfg802154_set_max_associations(struct wpan_dev *wpan_dev, + unsigned int max); + +/** + * cfg802154_get_free_short_addr - Get a free address among the known devices + * @wpan_dev: the wpan device + * @return: a random short address expectedly unused on our PAN + */ +__le16 cfg802154_get_free_short_addr(struct wpan_dev *wpan_dev); + #endif /* __NET_CFG802154_H */ diff --git a/include/net/devlink.h b/include/net/devlink.h index 29fd1b4ee654..9ac394bdfbe4 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -150,6 +150,7 @@ struct devlink_port { struct devlink_rate *devlink_rate; struct devlink_linecard *linecard; + u32 rel_index; }; struct devlink_port_new_attrs { @@ -1697,6 +1698,8 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller, u16 pf, u32 sf, bool external); +int devl_port_fn_devlink_set(struct devlink_port *devlink_port, + struct devlink *fn_devlink); struct devlink_rate * devl_rate_node_create(struct devlink *devlink, void *priv, char *node_name, struct devlink_rate *parent); @@ -1717,8 +1720,8 @@ void devlink_linecard_provision_clear(struct devlink_linecard *linecard); void devlink_linecard_provision_fail(struct devlink_linecard *linecard); void devlink_linecard_activate(struct devlink_linecard *linecard); void devlink_linecard_deactivate(struct devlink_linecard *linecard); -void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard, - struct devlink *nested_devlink); +int devlink_linecard_nested_dl_set(struct devlink_linecard *linecard, + struct devlink *nested_devlink); int devl_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, @@ -1851,36 +1854,36 @@ int devlink_info_version_running_put_ext(struct devlink_info_req *req, const char *version_value, enum devlink_info_version_type version_type); -int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg); -int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg); - -int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name); -int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg); - -int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg, - const char *name); -int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg); -int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg, - const char *name); -int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg); - -int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value); -int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value); -int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, - u16 value_len); - -int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name, - bool value); -int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name, - u8 value); -int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name, - u32 value); -int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name, - u64 value); -int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name, - const char *value); -int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name, - const void *value, u32 value_len); +void devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg); +void devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg); + +void devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name); +void devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg); + +void devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg, + const char *name); +void devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg); +void devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg, + const char *name); +void devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg); + +void devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value); +void devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value); +void devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, + u16 value_len); + +void devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name, + bool value); +void devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name, + u8 value); +void devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name, + u32 value); +void devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name, + u64 value); +void devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name, + const char *value); +void devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name, + const void *value, u32 value_len); struct devlink_health_reporter * devl_port_health_reporter_create(struct devlink_port *port, @@ -1918,6 +1921,8 @@ devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, void devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter); +int devl_nested_devlink_set(struct devlink *devlink, + struct devlink *nested_devlink); bool devlink_is_reload_failed(const struct devlink *devlink); void devlink_remote_reload_actions_performed(struct devlink *devlink, enum devlink_reload_limit limit, diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index a587e83fc169..6d3a20163260 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -20,9 +20,14 @@ FN(IP_NOPROTO) \ FN(SOCKET_RCVBUFF) \ FN(PROTO_MEM) \ + FN(TCP_AUTH_HDR) \ FN(TCP_MD5NOTFOUND) \ FN(TCP_MD5UNEXPECTED) \ FN(TCP_MD5FAILURE) \ + FN(TCP_AONOTFOUND) \ + FN(TCP_AOUNEXPECTED) \ + FN(TCP_AOKEYNOTFOUND) \ + FN(TCP_AOFAILURE) \ FN(SOCKET_BACKLOG) \ FN(TCP_FLAGS) \ FN(TCP_ZEROWINDOW) \ @@ -80,6 +85,10 @@ FN(IPV6_NDISC_BAD_OPTIONS) \ FN(IPV6_NDISC_NS_OTHERHOST) \ FN(QUEUE_PURGE) \ + FN(TC_COOKIE_ERROR) \ + FN(PACKET_SOCK_ERROR) \ + FN(TC_CHAIN_NOTFOUND) \ + FN(TC_RECLASSIFY_LOOP) \ FNe(MAX) /** @@ -142,6 +151,11 @@ enum skb_drop_reason { */ SKB_DROP_REASON_PROTO_MEM, /** + * @SKB_DROP_REASON_TCP_AUTH_HDR: TCP-MD5 or TCP-AO hashes are met + * twice or set incorrectly. + */ + SKB_DROP_REASON_TCP_AUTH_HDR, + /** * @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected, * corresponding to LINUX_MIB_TCPMD5NOTFOUND */ @@ -157,6 +171,26 @@ enum skb_drop_reason { */ SKB_DROP_REASON_TCP_MD5FAILURE, /** + * @SKB_DROP_REASON_TCP_AONOTFOUND: no TCP-AO hash and one was expected, + * corresponding to LINUX_MIB_TCPAOREQUIRED + */ + SKB_DROP_REASON_TCP_AONOTFOUND, + /** + * @SKB_DROP_REASON_TCP_AOUNEXPECTED: TCP-AO hash is present and it + * was not expected, corresponding to LINUX_MIB_TCPAOKEYNOTFOUND + */ + SKB_DROP_REASON_TCP_AOUNEXPECTED, + /** + * @SKB_DROP_REASON_TCP_AOKEYNOTFOUND: TCP-AO key is unknown, + * corresponding to LINUX_MIB_TCPAOKEYNOTFOUND + */ + SKB_DROP_REASON_TCP_AOKEYNOTFOUND, + /** + * @SKB_DROP_REASON_TCP_AOFAILURE: TCP-AO hash is wrong, + * corresponding to LINUX_MIB_TCPAOBAD + */ + SKB_DROP_REASON_TCP_AOFAILURE, + /** * @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog ( * see LINUX_MIB_TCPBACKLOGDROP) */ @@ -346,6 +380,23 @@ enum skb_drop_reason { /** @SKB_DROP_REASON_QUEUE_PURGE: bulk free. */ SKB_DROP_REASON_QUEUE_PURGE, /** + * @SKB_DROP_REASON_TC_COOKIE_ERROR: An error occurred whilst + * processing a tc ext cookie. + */ + SKB_DROP_REASON_TC_COOKIE_ERROR, + /** + * @SKB_DROP_REASON_PACKET_SOCK_ERROR: generic packet socket errors + * after its filter matches an incoming packet. + */ + SKB_DROP_REASON_PACKET_SOCK_ERROR, + /** @SKB_DROP_REASON_TC_CHAIN_NOTFOUND: tc chain lookup failed. */ + SKB_DROP_REASON_TC_CHAIN_NOTFOUND, + /** + * @SKB_DROP_REASON_TC_RECLASSIFY_LOOP: tc exceeded max reclassify loop + * iterations. + */ + SKB_DROP_REASON_TC_RECLASSIFY_LOOP, + /** * @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which * shouldn't be used as a real 'reason' - only for tracing code gen */ diff --git a/include/net/dsa.h b/include/net/dsa.h index 0b9c6aa27047..82135fbdb1e6 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -102,11 +102,11 @@ struct dsa_device_ops { const char *name; enum dsa_tag_protocol proto; /* Some tagging protocols either mangle or shift the destination MAC - * address, in which case the DSA master would drop packets on ingress + * address, in which case the DSA conduit would drop packets on ingress * if what it understands out of the destination MAC address is not in * its RX filter. */ - bool promisc_on_master; + bool promisc_on_conduit; }; struct dsa_lag { @@ -236,12 +236,12 @@ struct dsa_bridge { }; struct dsa_port { - /* A CPU port is physically connected to a master device. - * A user port exposed to userspace has a slave device. + /* A CPU port is physically connected to a conduit device. A user port + * exposes a network device to user-space, called 'user' here. */ union { - struct net_device *master; - struct net_device *slave; + struct net_device *conduit; + struct net_device *user; }; /* Copy of the tagging protocol operations, for quicker access @@ -249,7 +249,7 @@ struct dsa_port { */ const struct dsa_device_ops *tag_ops; - /* Copies for faster access in master receive hot path */ + /* Copies for faster access in conduit receive hot path */ struct dsa_switch_tree *dst; struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); @@ -281,9 +281,9 @@ struct dsa_port { u8 lag_tx_enabled:1; - /* Master state bits, valid only on CPU ports */ - u8 master_admin_up:1; - u8 master_oper_up:1; + /* conduit state bits, valid only on CPU ports */ + u8 conduit_admin_up:1; + u8 conduit_oper_up:1; /* Valid only on user ports */ u8 cpu_port_in_lag:1; @@ -303,7 +303,7 @@ struct dsa_port { struct list_head list; /* - * Original copy of the master netdev ethtool_ops + * Original copy of the conduit netdev ethtool_ops */ const struct ethtool_ops *orig_ethtool_ops; @@ -452,10 +452,10 @@ struct dsa_switch { const struct dsa_switch_ops *ops; /* - * Slave mii_bus and devices for the individual ports. + * User mii_bus and devices for the individual ports. */ u32 phys_mii_mask; - struct mii_bus *slave_mii_bus; + struct mii_bus *user_mii_bus; /* Ageing Time limits in msecs */ unsigned int ageing_time_min; @@ -520,10 +520,10 @@ static inline bool dsa_port_is_unused(struct dsa_port *dp) return dp->type == DSA_PORT_TYPE_UNUSED; } -static inline bool dsa_port_master_is_operational(struct dsa_port *dp) +static inline bool dsa_port_conduit_is_operational(struct dsa_port *dp) { - return dsa_port_is_cpu(dp) && dp->master_admin_up && - dp->master_oper_up; + return dsa_port_is_cpu(dp) && dp->conduit_admin_up && + dp->conduit_oper_up; } static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) @@ -713,12 +713,12 @@ static inline bool dsa_port_offloads_lag(struct dsa_port *dp, return dsa_port_lag_dev_get(dp) == lag->dev; } -static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp) +static inline struct net_device *dsa_port_to_conduit(const struct dsa_port *dp) { if (dp->cpu_port_in_lag) return dsa_port_lag_dev_get(dp->cpu_dp); - return dp->cpu_dp->master; + return dp->cpu_dp->conduit; } static inline @@ -732,7 +732,7 @@ struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp) else if (dp->hsr_dev) return dp->hsr_dev; - return dp->slave; + return dp->user; } static inline struct net_device * @@ -834,9 +834,9 @@ struct dsa_switch_ops { int (*connect_tag_protocol)(struct dsa_switch *ds, enum dsa_tag_protocol proto); - int (*port_change_master)(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack); + int (*port_change_conduit)(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack); /* Optional switch-wide initialization and destruction methods */ int (*setup)(struct dsa_switch *ds); @@ -969,6 +969,16 @@ struct dsa_switch_ops { struct phy_device *phy); void (*port_disable)(struct dsa_switch *ds, int port); + + /* + * Notification for MAC address changes on user ports. Drivers can + * currently only veto operations. They should not use the method to + * program the hardware, since the operation is not rolled back in case + * of other errors. + */ + int (*port_set_mac_address)(struct dsa_switch *ds, int port, + const unsigned char *addr); + /* * Compatibility between device trees defining multiple CPU ports and * drivers which are not OK to use by default the numerically smallest @@ -1198,7 +1208,8 @@ struct dsa_switch_ops { * HSR integration */ int (*port_hsr_join)(struct dsa_switch *ds, int port, - struct net_device *hsr); + struct net_device *hsr, + struct netlink_ext_ack *extack); int (*port_hsr_leave)(struct dsa_switch *ds, int port, struct net_device *hsr); @@ -1222,11 +1233,11 @@ struct dsa_switch_ops { int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid); /* - * DSA master tracking operations + * DSA conduit tracking operations */ - void (*master_state_change)(struct dsa_switch *ds, - const struct net_device *master, - bool operational); + void (*conduit_state_change)(struct dsa_switch *ds, + const struct net_device *conduit, + bool operational); }; #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ @@ -1363,9 +1374,9 @@ static inline int dsa_switch_resume(struct dsa_switch *ds) #endif /* CONFIG_PM_SLEEP */ #if IS_ENABLED(CONFIG_NET_DSA) -bool dsa_slave_dev_check(const struct net_device *dev); +bool dsa_user_dev_check(const struct net_device *dev); #else -static inline bool dsa_slave_dev_check(const struct net_device *dev) +static inline bool dsa_user_dev_check(const struct net_device *dev) { return false; } diff --git a/include/net/dsa_stubs.h b/include/net/dsa_stubs.h index 361811750a54..6f384897f287 100644 --- a/include/net/dsa_stubs.h +++ b/include/net/dsa_stubs.h @@ -13,14 +13,14 @@ extern const struct dsa_stubs *dsa_stubs; struct dsa_stubs { - int (*master_hwtstamp_validate)(struct net_device *dev, - const struct kernel_hwtstamp_config *config, - struct netlink_ext_ack *extack); + int (*conduit_hwtstamp_validate)(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); }; -static inline int dsa_master_hwtstamp_validate(struct net_device *dev, - const struct kernel_hwtstamp_config *config, - struct netlink_ext_ack *extack) +static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { if (!netdev_uses_dsa(dev)) return 0; @@ -29,18 +29,18 @@ static inline int dsa_master_hwtstamp_validate(struct net_device *dev, * netdev_uses_dsa() returns true, the dsa_core module is still * registered, and so, dsa_unregister_stubs() couldn't have run. * For netdev_uses_dsa() to start returning false, it would imply that - * dsa_master_teardown() has executed, which requires rtnl_lock(). + * dsa_conduit_teardown() has executed, which requires rtnl_lock(). */ ASSERT_RTNL(); - return dsa_stubs->master_hwtstamp_validate(dev, config, extack); + return dsa_stubs->conduit_hwtstamp_validate(dev, config, extack); } #else -static inline int dsa_master_hwtstamp_validate(struct net_device *dev, - const struct kernel_hwtstamp_config *config, - struct netlink_ext_ack *extack) +static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { return 0; } diff --git a/include/net/dst.h b/include/net/dst.h index 78884429deed..f5dfc8fb7b37 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -222,13 +222,6 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr return msecs_to_jiffies(dst_metric(dst, metric)); } -static inline u32 -dst_allfrag(const struct dst_entry *dst) -{ - int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); - return ret; -} - static inline int dst_metric_locked(const struct dst_entry *dst, int metric) { @@ -392,10 +385,10 @@ static inline int dst_discard(struct sk_buff *skb) { return dst_discard_out(&init_net, skb->sk, skb); } -void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, +void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_obsolete, unsigned short flags); void dst_init(struct dst_entry *dst, struct dst_ops *ops, - struct net_device *dev, int initial_ref, int initial_obsolete, + struct net_device *dev, int initial_obsolete, unsigned short flags); struct dst_entry *dst_destroy(struct dst_entry *dst); void dst_dev_put(struct dst_entry *dst); diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 82da359bca03..d17855c52ef9 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -172,8 +172,7 @@ void fib_rules_unregister(struct fib_rules_ops *); int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags, struct fib_lookup_arg *); -int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table, - u32 flags); +int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table); bool fib_rule_matchall(const struct fib_rule *rule); int fib_rules_dump(struct net *net, struct notifier_block *nb, int family, struct netlink_ext_ack *extack); diff --git a/include/net/flow.h b/include/net/flow.h index 7f0adda3bf2f..335bbc52171c 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -40,8 +40,8 @@ struct flowi_common { #define FLOWI_FLAG_KNOWN_NH 0x02 __u32 flowic_secid; kuid_t flowic_uid; - struct flowi_tunnel flowic_tun_key; __u32 flowic_multipath_hash; + struct flowi_tunnel flowic_tun_key; }; union flowi_uli { diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 9efa9a59e81f..314087a5e181 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -333,7 +333,7 @@ struct flow_action_entry { struct flow_action { unsigned int num_entries; - struct flow_action_entry entries[]; + struct flow_action_entry entries[] __counted_by(num_entries); }; static inline bool flow_action_has_entries(const struct flow_action *action) diff --git a/include/net/genetlink.h b/include/net/genetlink.h index e18a4c0d69ee..e61469129402 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -8,10 +8,15 @@ #define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN) +/* Binding to multicast group requires %CAP_NET_ADMIN */ +#define GENL_MCAST_CAP_NET_ADMIN BIT(0) +/* Binding to multicast group requires %CAP_SYS_ADMIN */ +#define GENL_MCAST_CAP_SYS_ADMIN BIT(1) + /** * struct genl_multicast_group - generic netlink multicast group * @name: name of the multicast group, names are per-family - * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM) + * @flags: GENL_MCAST_* flags */ struct genl_multicast_group { char name[GENL_NAMSIZ]; @@ -49,6 +54,9 @@ struct genl_info; * @split_ops: the split do/dump form of operation definition * @n_split_ops: number of entries in @split_ops, not that with split do/dump * ops the number of entries is not the same as number of commands + * @sock_priv_size: the size of per-socket private memory + * @sock_priv_init: the per-socket private memory initializer + * @sock_priv_destroy: the per-socket private memory destructor * * Attribute policies (the combination of @policy and @maxattr fields) * can be attached at the family level or at the operation level. @@ -82,11 +90,17 @@ struct genl_family { const struct genl_multicast_group *mcgrps; struct module *module; + size_t sock_priv_size; + void (*sock_priv_init)(void *priv); + void (*sock_priv_destroy)(void *priv); + /* private: internal use only */ /* protocol family identifier */ int id; /* starting number of multicast group IDs in this family */ unsigned int mcgrp_offset; + /* list of per-socket privs */ + struct xarray *sock_privs; }; /** @@ -296,6 +310,8 @@ static inline bool genl_info_is_ntf(const struct genl_info *info) return !info->nlhdr; } +void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk); +void *genl_sk_priv_get(struct genl_family *family, struct sock *sk); int genl_register_family(struct genl_family *family); int genl_unregister_family(const struct genl_family *family); void genl_notify(const struct genl_family *family, struct sk_buff *skb, @@ -436,6 +452,35 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr) } /** + * genlmsg_multicast_netns_filtered - multicast a netlink message + * to a specific netns with filter + * function + * @family: the generic netlink family + * @net: the net namespace + * @skb: netlink message as socket buffer + * @portid: own netlink portid to avoid sending to yourself + * @group: offset of multicast group in groups array + * @flags: allocation flags + * @filter: filter function + * @filter_data: filter function private data + * + * Return: 0 on success, negative error code for failure. + */ +static inline int +genlmsg_multicast_netns_filtered(const struct genl_family *family, + struct net *net, struct sk_buff *skb, + u32 portid, unsigned int group, gfp_t flags, + netlink_filter_fn filter, + void *filter_data) +{ + if (WARN_ON_ONCE(group >= family->n_mcgrps)) + return -EINVAL; + group = family->mcgrp_offset + group; + return nlmsg_multicast_filtered(net->genl_sock, skb, portid, group, + flags, filter, filter_data); +} + +/** * genlmsg_multicast_netns - multicast a netlink message to a specific netns * @family: the generic netlink family * @net: the net namespace @@ -448,10 +493,8 @@ static inline int genlmsg_multicast_netns(const struct genl_family *family, struct net *net, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) { - if (WARN_ON_ONCE(group >= family->n_mcgrps)) - return -EINVAL; - group = family->mcgrp_offset + group; - return nlmsg_multicast(net->genl_sock, skb, portid, group, flags); + return genlmsg_multicast_netns_filtered(family, net, skb, portid, + group, flags, NULL, NULL); } /** diff --git a/include/net/gro.h b/include/net/gro.h index 88644b3ca660..b435f0ddbf64 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -41,7 +41,7 @@ struct napi_gro_cb { /* Number of segments aggregated. */ u16 count; - /* Used in ipv6_gro_receive() and foo-over-udp */ + /* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */ u16 proto; /* Used in napi_gro_cb::free */ diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 2338f8d2a8b3..925bac726a92 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -539,6 +539,12 @@ enum ieee80211_radiotap_eht_usig_common { IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK = 0x00000080, IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER = 0x00007000, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW = 0x00038000, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ = 0, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ = 1, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ = 2, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ = 3, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1 = 4, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_2 = 5, IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL = 0x00040000, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR = 0x01f80000, IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP = 0xfe000000, diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 063313df447d..4de858f9929e 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -125,6 +125,35 @@ struct ieee802154_hdr_fc { #endif }; +struct ieee802154_assoc_req_pl { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 reserved1:1, + device_type:1, + power_source:1, + rx_on_when_idle:1, + assoc_type:1, + reserved2:1, + security_cap:1, + alloc_addr:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 alloc_addr:1, + security_cap:1, + reserved2:1, + assoc_type:1, + rx_on_when_idle:1, + power_source:1, + device_type:1, + reserved1:1; +#else +#error "Please fix <asm/byteorder.h>" +#endif +} __packed; + +struct ieee802154_assoc_resp_pl { + __le16 short_addr; + u8 status; +} __packed; + enum ieee802154_frame_version { IEEE802154_2003_STD, IEEE802154_2006_STD, @@ -140,6 +169,19 @@ enum ieee802154_addressing_mode { IEEE802154_EXTENDED_ADDRESSING, }; +enum ieee802154_association_status { + IEEE802154_ASSOCIATION_SUCCESSFUL = 0x00, + IEEE802154_PAN_AT_CAPACITY = 0x01, + IEEE802154_PAN_ACCESS_DENIED = 0x02, + IEEE802154_HOPPING_SEQUENCE_OFFSET_DUP = 0x03, + IEEE802154_FAST_ASSOCIATION_SUCCESSFUL = 0x80, +}; + +enum ieee802154_disassociation_reason { + IEEE802154_COORD_WISHES_DEVICE_TO_LEAVE = 0x1, + IEEE802154_DEVICE_WISHES_TO_LEAVE = 0x2, +}; + struct ieee802154_hdr { struct ieee802154_hdr_fc fc; u8 seq; @@ -163,6 +205,24 @@ struct ieee802154_beacon_req_frame { struct ieee802154_mac_cmd_pl mac_pl; }; +struct ieee802154_association_req_frame { + struct ieee802154_hdr mhr; + struct ieee802154_mac_cmd_pl mac_pl; + struct ieee802154_assoc_req_pl assoc_req_pl; +}; + +struct ieee802154_association_resp_frame { + struct ieee802154_hdr mhr; + struct ieee802154_mac_cmd_pl mac_pl; + struct ieee802154_assoc_resp_pl assoc_resp_pl; +}; + +struct ieee802154_disassociation_notif_frame { + struct ieee802154_hdr mhr; + struct ieee802154_mac_cmd_pl mac_pl; + u8 disassoc_pl; +}; + /* pushes hdr onto the skb. fields of hdr->fc that can be calculated from * the contents of hdr will be, and the actual value of those bits in * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index c8490729b4ae..f07642264c1e 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -22,10 +22,6 @@ #define IF_RS_SENT 0x10 #define IF_READY 0x80000000 -/* prefix flags */ -#define IF_PREFIX_ONLINK 0x01 -#define IF_PREFIX_AUTOCONF 0x02 - enum { INET6_IFADDR_STATE_PREDAD, INET6_IFADDR_STATE_DAD, @@ -89,7 +85,7 @@ struct ip6_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct rcu_head rcu; - struct in6_addr sl_addr[]; + struct in6_addr sl_addr[] __counted_by(sl_max); }; #define IP6_SFBLOCK 10 /* allocate this many at once */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 5d2fcc137b88..9ab4bf704e86 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -44,7 +44,6 @@ struct inet_connection_sock_af_ops { struct request_sock *req_unhash, bool *own_req); u16 net_header_len; - u16 net_frag_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); @@ -114,7 +113,10 @@ struct inet_connection_sock { __u8 quick; /* Scheduled number of quick acks */ __u8 pingpong; /* The session is interactive */ __u8 retry; /* Number of attempts */ - __u32 ato; /* Predicted tick of soft clock */ + #define ATO_BITS 8 + __u32 ato:ATO_BITS, /* Predicted tick of soft clock */ + lrcv_flowlabel:20, /* last received ipv6 flowlabel */ + unused:4; unsigned long timeout; /* Currently scheduled timeout */ __u32 lrcvtime; /* timestamp of last received data packet */ __u16 last_seg_size; /* Size of last incoming segment */ @@ -325,11 +327,10 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); -#define TCP_PINGPONG_THRESH 1 - static inline void inet_csk_enter_pingpong_mode(struct sock *sk) { - inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH; + inet_csk(sk)->icsk_ack.pingpong = + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); } static inline void inet_csk_exit_pingpong_mode(struct sock *sk) @@ -339,7 +340,16 @@ static inline void inet_csk_exit_pingpong_mode(struct sock *sk) static inline bool inet_csk_in_pingpong_mode(struct sock *sk) { - return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; + return inet_csk(sk)->icsk_ack.pingpong >= + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); +} + +static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + + if (icsk->icsk_ack.pingpong < U8_MAX) + icsk->icsk_ack.pingpong++; } static inline bool inet_csk_has_ulp(const struct sock *sk) @@ -347,4 +357,12 @@ static inline bool inet_csk_has_ulp(const struct sock *sk) return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops; } +static inline void inet_init_csk_locks(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + + spin_lock_init(&icsk->icsk_accept_queue.rskq_lock); + spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock); +} + #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 3ecfeadbfa06..7f1b38458743 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -88,7 +88,7 @@ struct inet_bind_bucket { unsigned short fast_sk_family; bool fast_ipv6_only; struct hlist_node node; - struct hlist_head owners; + struct hlist_head bhash2; }; struct inet_bind2_bucket { @@ -96,22 +96,17 @@ struct inet_bind2_bucket { int l3mdev; unsigned short port; #if IS_ENABLED(CONFIG_IPV6) - unsigned short family; -#endif - union { -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr v6_rcv_saddr; + unsigned short addr_type; + struct in6_addr v6_rcv_saddr; +#define rcv_saddr v6_rcv_saddr.s6_addr32[3] +#else + __be32 rcv_saddr; #endif - __be32 rcv_saddr; - }; /* Node in the bhash2 inet_bind_hashbucket chain */ struct hlist_node node; + struct hlist_node bhash_node; /* List of sockets hashed to this bucket */ struct hlist_head owners; - /* bhash has twsk in owners, but bhash2 has twsk in - * deathrow not to add a member in struct sock_common. - */ - struct hlist_head deathrow; }; static inline struct net *ib_net(const struct inet_bind_bucket *ib) @@ -241,7 +236,7 @@ bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, struct inet_bind2_bucket * inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, - unsigned short port, int l3mdev, + struct inet_bind_bucket *tb, const struct sock *sk); void inet_bind2_bucket_destroy(struct kmem_cache *cachep, diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 2de0e4d4a027..d94c242eb3ed 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -234,17 +234,13 @@ struct inet_sock { int uc_index; int mc_index; __be32 mc_addr; - struct { - __u16 lo; - __u16 hi; - } local_port_range; + u32 local_port_range; /* high << 16 | low */ struct ip_mc_socklist __rcu *mc_list; struct inet_cork_full cork; }; #define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ -#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */ enum { INET_FLAGS_PKTINFO = 0, @@ -268,6 +264,16 @@ enum { INET_FLAGS_NODEFRAG = 17, INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, INET_FLAGS_DEFER_CONNECT = 19, + INET_FLAGS_MC6_LOOP = 20, + INET_FLAGS_RECVERR6_RFC4884 = 21, + INET_FLAGS_MC6_ALL = 22, + INET_FLAGS_AUTOFLOWLABEL_SET = 23, + INET_FLAGS_AUTOFLOWLABEL = 24, + INET_FLAGS_DONTFRAG = 25, + INET_FLAGS_RECVERR6 = 26, + INET_FLAGS_REPFLOW = 27, + INET_FLAGS_RTALERT_ISOLATE = 28, + INET_FLAGS_SNDFLOW = 29, }; /* cmsg flags for inet */ @@ -301,11 +307,6 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet) #define inet_assign_bit(nr, sk, val) \ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val) -static inline bool sk_is_inet(struct sock *sk) -{ - return sk->sk_family == AF_INET || sk->sk_family == AF_INET6; -} - /** * sk_to_full_sk - Access to a full socket * @sk: pointer to a socket diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 4a8e578405cb..f28da08a37b4 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -67,20 +67,17 @@ struct inet_timewait_sock { /* And these are ours. */ unsigned int tw_transparent : 1, tw_flowlabel : 20, - tw_pad : 3, /* 3 bits hole */ + tw_usec_ts : 1, + tw_pad : 2, /* 2 bits hole */ tw_tos : 8; u32 tw_txhash; u32 tw_priority; struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; struct inet_bind2_bucket *tw_tb2; - struct hlist_node tw_bind2_node; }; #define tw_tclass tw_tos -#define twsk_for_each_bound_bhash2(__tw, list) \ - hlist_for_each_entry(__tw, list, tw_bind2_node) - static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) { return (struct inet_timewait_sock *)sk; diff --git a/include/net/ip.h b/include/net/ip.h index 19adacd5ece0..25cb688bdc62 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -57,6 +57,7 @@ struct inet_skb_parm { #define IPSKB_FRAG_PMTU BIT(6) #define IPSKB_L3SLAVE BIT(7) #define IPSKB_NOPOLICY BIT(8) +#define IPSKB_MULTIPATH BIT(9) u16 frag_max_size; }; @@ -94,7 +95,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, ipcm_init(ipcm); ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark); - ipcm->sockc.tsflags = inet->sk.sk_tsflags; + ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags); ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); ipcm->addr = inet->inet_saddr; ipcm->protocol = inet->inet_num; @@ -257,7 +258,7 @@ static inline u8 ip_sendmsg_scope(const struct inet_sock *inet, static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) { - return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); + return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos)); } /* datagram.c */ @@ -348,8 +349,14 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o } \ } -void inet_get_local_port_range(const struct net *net, int *low, int *high); -void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high); +static inline void inet_get_local_port_range(const struct net *net, int *low, int *high) +{ + u32 range = READ_ONCE(net->ipv4.ip_local_ports.range); + + *low = range & 0xffff; + *high = range >> 16; +} +bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high); #ifdef CONFIG_SYSCTL static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port) @@ -433,19 +440,22 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) static inline bool ip_sk_accept_pmtu(const struct sock *sk) { - return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && - inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; + u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); + + return pmtudisc != IP_PMTUDISC_INTERFACE && + pmtudisc != IP_PMTUDISC_OMIT; } static inline bool ip_sk_use_pmtu(const struct sock *sk) { - return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; + return READ_ONCE(inet_sk(sk)->pmtudisc) < IP_PMTUDISC_PROBE; } static inline bool ip_sk_ignore_df(const struct sock *sk) { - return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || - inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; + u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); + + return pmtudisc < IP_PMTUDISC_DO || pmtudisc == IP_PMTUDISC_OMIT; } static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, @@ -757,7 +767,7 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev); * Functions provided by ip_sockglue.c */ -void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); +void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst); void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, int tlen, int offset); int ip_cmsg_send(struct sock *sk, struct msghdr *msg, diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index c9ff23cf313e..9ba6413fd2e3 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -179,9 +179,6 @@ struct fib6_info { refcount_t fib6_ref; unsigned long expires; - - struct hlist_node gc_link; - struct dst_metrics *fib6_metrics; #define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1] @@ -250,6 +247,19 @@ static inline bool fib6_requires_src(const struct fib6_info *rt) return rt->fib6_src.plen > 0; } +static inline void fib6_clean_expires(struct fib6_info *f6i) +{ + f6i->fib6_flags &= ~RTF_EXPIRES; + f6i->expires = 0; +} + +static inline void fib6_set_expires(struct fib6_info *f6i, + unsigned long expires) +{ + f6i->expires = expires; + f6i->fib6_flags |= RTF_EXPIRES; +} + static inline bool fib6_check_expired(const struct fib6_info *f6i) { if (f6i->fib6_flags & RTF_EXPIRES) @@ -257,11 +267,6 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i) return false; } -static inline bool fib6_has_expires(const struct fib6_info *f6i) -{ - return f6i->fib6_flags & RTF_EXPIRES; -} - /* Function to safely get fn->fn_sernum for passed in rt * and store result in passed in cookie. * Return true if we can get cookie safely @@ -383,7 +388,6 @@ struct fib6_table { struct inet_peer_base tb6_peers; unsigned int flags; unsigned int fib_seq; - struct hlist_head tb6_gc_hlist; /* GC candidates */ #define RT6_TABLE_HAS_DFLT_ROUTER BIT(0) }; @@ -500,48 +504,6 @@ void fib6_gc_cleanup(void); int fib6_init(void); -/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be - * NULL. - */ -static inline void fib6_set_expires_locked(struct fib6_info *f6i, - unsigned long expires) -{ - struct fib6_table *tb6; - - tb6 = f6i->fib6_table; - f6i->expires = expires; - if (tb6 && !fib6_has_expires(f6i)) - hlist_add_head(&f6i->gc_link, &tb6->tb6_gc_hlist); - f6i->fib6_flags |= RTF_EXPIRES; -} - -/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be - * NULL. If fib6_table is NULL, the fib6_info will no be inserted into the - * list of GC candidates until it is inserted into a table. - */ -static inline void fib6_set_expires(struct fib6_info *f6i, - unsigned long expires) -{ - spin_lock_bh(&f6i->fib6_table->tb6_lock); - fib6_set_expires_locked(f6i, expires); - spin_unlock_bh(&f6i->fib6_table->tb6_lock); -} - -static inline void fib6_clean_expires_locked(struct fib6_info *f6i) -{ - if (fib6_has_expires(f6i)) - hlist_del_init(&f6i->gc_link); - f6i->fib6_flags &= ~RTF_EXPIRES; - f6i->expires = 0; -} - -static inline void fib6_clean_expires(struct fib6_info *f6i) -{ - spin_lock_bh(&f6i->fib6_table->tb6_lock); - fib6_clean_expires_locked(f6i); - spin_unlock_bh(&f6i->fib6_table->tb6_lock); -} - struct ipv6_route_iter { struct seq_net_private p; struct fib6_walker w; @@ -642,7 +604,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net, if (!net->ipv6.fib6_rules_require_fldissect) return false; - skb_flow_dissect_flow_keys(skb, flkeys, flag); + memset(flkeys, 0, sizeof(*flkeys)); + __skb_flow_dissect(net, skb, &flow_keys_dissector, + flkeys, NULL, 0, 0, 0, flag); + fl6->fl6_sport = flkeys->ports.src; fl6->fl6_dport = flkeys->ports.dst; fl6->flowi6_proto = flkeys->basic.ip_proto; diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index b32539bb0fb0..28b065790261 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -53,13 +53,12 @@ struct route_info { */ static inline int rt6_srcprefs2flags(unsigned int srcprefs) { - /* No need to bitmask because srcprefs have only 3 bits. */ - return srcprefs << 3; + return (srcprefs & IPV6_PREFER_SRC_MASK) << 3; } static inline unsigned int rt6_flags2srcprefs(int flags) { - return (flags >> 3) & 7; + return (flags >> 3) & IPV6_PREFER_SRC_MASK; } static inline bool rt6_need_strict(const struct in6_addr *daddr) @@ -266,7 +265,7 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb) const struct dst_entry *dst = skb_dst(skb); unsigned int mtu; - if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) { + if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) { mtu = READ_ONCE(dst->dev->mtu); mtu -= lwtunnel_headroom(dst->lwtstate, mtu); } else { @@ -277,14 +276,18 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb) static inline bool ip6_sk_accept_pmtu(const struct sock *sk) { - return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE && - inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT; + u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc); + + return pmtudisc != IPV6_PMTUDISC_INTERFACE && + pmtudisc != IPV6_PMTUDISC_OMIT; } static inline bool ip6_sk_ignore_df(const struct sock *sk) { - return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO || - inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; + u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc); + + return pmtudisc < IPV6_PMTUDISC_DO || + pmtudisc == IPV6_PMTUDISC_OMIT; } static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index a378eff827c7..d4667b7797e3 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -154,9 +154,10 @@ struct fib_info { int fib_nhs; bool fib_nh_is_v6; bool nh_updated; + bool pfsrc_removed; struct nexthop *nh; struct rcu_head rcu; - struct fib_nh fib_nh[]; + struct fib_nh fib_nh[] __counted_by(fib_nhs); }; @@ -418,7 +419,10 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net, if (!net->ipv4.fib_rules_require_fldissect) return false; - skb_flow_dissect_flow_keys(skb, flkeys, flag); + memset(flkeys, 0, sizeof(*flkeys)); + __skb_flow_dissect(net, skb, &flow_keys_dissector, + flkeys, NULL, 0, 0, 0, flag); + fl4->fl4_sport = flkeys->ports.src; fl4->fl4_dport = flkeys->ports.dst; fl4->flowi4_proto = flkeys->basic.ip_proto; diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index e8750b4ef7e1..2d746f4c9a0a 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -416,6 +416,17 @@ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, return 0; } +static inline __be32 ip_tunnel_get_flowlabel(const struct iphdr *iph, + const struct sk_buff *skb) +{ + __be16 payload_protocol = skb_protocol(skb, true); + + if (payload_protocol == htons(ETH_P_IPV6)) + return ip6_flowlabel((const struct ipv6hdr *)iph); + else + return 0; +} + static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph, const struct sk_buff *skb) { @@ -483,15 +494,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) u64_stats_inc(&tstats->tx_packets); u64_stats_update_end(&tstats->syncp); put_cpu_ptr(tstats); + return; + } + + if (pkt_len < 0) { + DEV_STATS_INC(dev, tx_errors); + DEV_STATS_INC(dev, tx_aborted_errors); } else { - struct net_device_stats *err_stats = &dev->stats; - - if (pkt_len < 0) { - err_stats->tx_errors++; - err_stats->tx_aborted_errors++; - } else { - err_stats->tx_dropped++; - } + DEV_STATS_INC(dev, tx_dropped); } } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 0675be0f3fa0..cf25ea21d770 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -373,12 +373,12 @@ static inline void ipcm6_init(struct ipcm6_cookie *ipc6) } static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, - const struct ipv6_pinfo *np) + const struct sock *sk) { *ipc6 = (struct ipcm6_cookie) { .hlimit = -1, - .tclass = np->tclass, - .dontfrag = np->dontfrag, + .tclass = inet6_sk(sk)->tclass, + .dontfrag = inet6_test_bit(DONTFRAG, sk), }; } @@ -428,7 +428,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags); int ip6_flowlabel_init(void); void ip6_flowlabel_cleanup(void); -bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np); +bool ip6_autoflowlabel(struct net *net, const struct sock *sk); static inline void fl6_sock_release(struct ip6_flowlabel *fl) { @@ -909,9 +909,9 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, int hlimit; if (ipv6_addr_is_multicast(&fl6->daddr)) - hlimit = np->mcast_hops; + hlimit = READ_ONCE(np->mcast_hops); else - hlimit = np->hop_limit; + hlimit = READ_ONCE(np->hop_limit); if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); return hlimit; @@ -1128,12 +1128,6 @@ struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, st struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool connected); -struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb, - struct net_device *dev, - struct net *net, struct socket *sock, - struct in6_addr *saddr, - const struct ip_tunnel_info *info, - u8 protocol, bool use_cache); struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *orig_dst); @@ -1298,15 +1292,16 @@ static inline int ip6_sock_set_v6only(struct sock *sk) static inline void ip6_sock_set_recverr(struct sock *sk) { - lock_sock(sk); - inet6_sk(sk)->recverr = true; - release_sock(sk); + inet6_set_bit(RECVERR6, sk); } -static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val) +#define IPV6_PREFER_SRC_MASK (IPV6_PREFER_SRC_TMP | IPV6_PREFER_SRC_PUBLIC | \ + IPV6_PREFER_SRC_COA) + +static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val) { + unsigned int prefmask = ~IPV6_PREFER_SRC_MASK; unsigned int pref = 0; - unsigned int prefmask = ~0; /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ switch (val & (IPV6_PREFER_SRC_PUBLIC | @@ -1356,20 +1351,11 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val) return -EINVAL; } - inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref; + WRITE_ONCE(inet6_sk(sk)->srcprefs, + (READ_ONCE(inet6_sk(sk)->srcprefs) & prefmask) | pref); return 0; } -static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val) -{ - int ret; - - lock_sock(sk); - ret = __ip6_sock_set_addr_preferences(sk, val); - release_sock(sk); - return ret; -} - static inline void ip6_sock_set_recvpktinfo(struct sock *sk) { lock_sock(sk); diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index c48186bf4737..485c39a89866 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -60,6 +60,9 @@ struct ipv6_stub { #if IS_ENABLED(CONFIG_XFRM) void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu); int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb); + struct sk_buff *(*xfrm6_gro_udp_encap_rcv)(struct sock *sk, + struct list_head *head, + struct sk_buff *skb); int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); #endif @@ -85,6 +88,11 @@ struct ipv6_bpf_stub { sockptr_t optval, unsigned int optlen); int (*ipv6_getsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, sockptr_t optlen); + int (*ipv6_dev_get_saddr)(struct net *net, + const struct net_device *dst_dev, + const struct in6_addr *daddr, + unsigned int prefs, + struct in6_addr *saddr); }; extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h index f9e88401d7da..8b2055d64a6b 100644 --- a/include/net/iucv/iucv.h +++ b/include/net/iucv/iucv.h @@ -80,7 +80,7 @@ struct iucv_array { u32 length; } __attribute__ ((aligned (8))); -extern struct bus_type iucv_bus; +extern const struct bus_type iucv_bus; extern struct device *iucv_root; /* @@ -489,7 +489,7 @@ struct iucv_interface { int (*path_sever)(struct iucv_path *path, u8 userdata[16]); int (*iucv_register)(struct iucv_handler *handler, int smp); void (*iucv_unregister)(struct iucv_handler *handler, int smp); - struct bus_type *bus; + const struct bus_type *bus; struct device *root; }; diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 7e73f8e5e497..1d55ba7c45be 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, */ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) { - if (skb->protocol == htons(ETH_P_802_2)) - memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); + memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); } /** @@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) */ static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da) { - if (skb->protocol == htons(ETH_P_802_2)) - memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); + memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); } /** diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 7c707358d15c..d400fe2e8668 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -79,7 +79,7 @@ * helpers for sanity checking. Drivers must ensure all work added onto the * mac80211 workqueue should be cancelled on the driver stop() callback. * - * mac80211 will flushed the workqueue upon interface removal and during + * mac80211 will flush the workqueue upon interface removal and during * suspend. * * All work performed on the mac80211 workqueue must not acquire the RTNL lock. @@ -138,7 +138,7 @@ * field to the frame RX timestamp and report the ack TX timestamp in the * ieee80211_rx_status struct. * - * Similarly, To report hardware timestamps for Timing Measurement or Fine + * Similarly, to report hardware timestamps for Timing Measurement or Fine * Timing Measurement frame TX, the driver should set the SKB's hwtstamp field * to the frame TX timestamp and report the ack RX timestamp in the * ieee80211_tx_status struct. @@ -341,6 +341,7 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response * status changed. * @BSS_CHANGED_EHT_PUNCTURING: The channel puncturing bitmap changed. + * @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed. */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -376,6 +377,7 @@ enum ieee80211_bss_change { BSS_CHANGED_FILS_DISCOVERY = 1<<30, BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31, BSS_CHANGED_EHT_PUNCTURING = BIT_ULL(32), + BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33), /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -474,9 +476,9 @@ struct ieee80211_ba_event { /** * struct ieee80211_event - event to be sent to the driver * @type: The event itself. See &enum ieee80211_event_type. - * @rssi: relevant if &type is %RSSI_EVENT - * @mlme: relevant if &type is %AUTH_EVENT - * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT + * @u.rssi: relevant if &type is %RSSI_EVENT + * @u.mlme: relevant if &type is %AUTH_EVENT + * @u.ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT * @u:union holding the fields above */ struct ieee80211_event { @@ -539,8 +541,6 @@ struct ieee80211_fils_discovery { * @link_id: link ID, or 0 for non-MLO * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE * @uora_exists: is the UORA element advertised by AP - * @ack_enabled: indicates support to receive a multi-TID that solicits either - * ACK, BACK or both * @uora_ocw_range: UORA element's OCW Range field * @frame_time_rts_th: HE duration RTS threshold, in units of 32us * @he_support: does this BSS support HE @@ -643,9 +643,7 @@ struct ieee80211_fils_discovery { * @pwr_reduction: power constraint of BSS. * @eht_support: does this BSS support EHT * @eht_puncturing: bitmap to indicate which channels are punctured in this BSS - * @csa_active: marks whether a channel switch is going on. Internally it is - * write-protected by sdata_lock and local->mtx so holding either is fine - * for read access. + * @csa_active: marks whether a channel switch is going on. * @csa_punct_bitmap: new puncturing bitmap for channel switch * @mu_mimo_owner: indicates interface owns MU-MIMO capability * @chanctx_conf: The channel context this interface is assigned to, or %NULL @@ -653,9 +651,7 @@ struct ieee80211_fils_discovery { * path needing to access it; even though the netdev carrier will always * be off when it is %NULL there can still be races and packets could be * processed after it switches back to %NULL. - * @color_change_active: marks whether a color change is ongoing. Internally it is - * write-protected by sdata_lock and local->mtx so holding either is fine - * for read access. + * @color_change_active: marks whether a color change is ongoing. * @color_change_color: the bss color that will be used after the change. * @ht_ldpc: in AP mode, indicates interface has HT LDPC capability. * @vht_ldpc: in AP mode, indicates interface has VHT LDPC capability. @@ -1082,6 +1078,11 @@ struct ieee80211_tx_rate { #define IEEE80211_MAX_TX_RETRY 31 +static inline bool ieee80211_rate_valid(struct ieee80211_tx_rate *rate) +{ + return rate->idx >= 0 && rate->count > 0; +} + static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate, u8 mcs, u8 nss) { @@ -1115,7 +1116,9 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate) * not valid if the interface is an MLD since we won't know which * link the frame will be transmitted on * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC - * @ack_frame_id: internal frame ID for TX status, used internally + * @status_data: internal data for TX status handling, assigned privately, + * see also &enum ieee80211_status_data for the internal documentation + * @status_data_idr: indicates status data is IDR allocated ID for ack frame * @tx_time_est: TX time estimate in units of 4us, used internally * @control: union part for control data * @control.rates: TX rates array to try @@ -1145,20 +1148,16 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate) * @ack: union part for pure ACK data * @ack.cookie: cookie for the ACK * @driver_data: array of driver_data pointers - * @ampdu_ack_len: number of acked aggregated frames. - * relevant only if IEEE80211_TX_STAT_AMPDU was set. - * @ampdu_len: number of aggregated frames. - * relevant only if IEEE80211_TX_STAT_AMPDU was set. - * @ack_signal: signal strength of the ACK frame */ struct ieee80211_tx_info { /* common information */ u32 flags; u32 band:3, - ack_frame_id:13, + status_data_idr:1, + status_data:13, hw_queue:4, tx_time_est:10; - /* 2 free bits */ + /* 1 free bit */ union { struct { @@ -1172,7 +1171,11 @@ struct ieee80211_tx_info { u8 use_cts_prot:1; u8 short_preamble:1; u8 skip_table:1; - /* 2 bytes free */ + + /* for injection only (bitmap) */ + u8 antennas:2; + + /* 14 bits free */ }; /* only needed before rate control */ unsigned long jiffies; @@ -1352,6 +1355,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * the frame. * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on * the frame. + * @RX_FLAG_MACTIME: The timestamp passed in the RX status (@mactime + * field) is valid if this field is non-zero, and the position + * where the timestamp was sampled depends on the value. * @RX_FLAG_MACTIME_START: The timestamp passed in the RX status (@mactime * field) is valid and contains the time the first symbol of the MPDU * was received. This is useful in monitor mode and for proper IBSS @@ -1361,6 +1367,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * (including FCS) was received. * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime * field) is valid and contains the time the SYNC preamble was received. + * @RX_FLAG_MACTIME_IS_RTAP_TS64: The timestamp passed in the RX status @mactime + * is only for use in the radiotap timestamp header, not otherwise a valid + * @mactime value. Note this is a separate flag so that we continue to see + * %RX_FLAG_MACTIME as unset. Also note that in this case the timestamp is + * reported to be 64 bits wide, not just 32. * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present. * Valid only for data frames (mainly A-MPDU) * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference @@ -1431,12 +1442,12 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), RX_FLAG_DECRYPTED = BIT(1), - RX_FLAG_MACTIME_PLCP_START = BIT(2), + RX_FLAG_ONLY_MONITOR = BIT(2), RX_FLAG_MMIC_STRIPPED = BIT(3), RX_FLAG_IV_STRIPPED = BIT(4), RX_FLAG_FAILED_FCS_CRC = BIT(5), RX_FLAG_FAILED_PLCP_CRC = BIT(6), - RX_FLAG_MACTIME_START = BIT(7), + RX_FLAG_MACTIME_IS_RTAP_TS64 = BIT(7), RX_FLAG_NO_SIGNAL_VAL = BIT(8), RX_FLAG_AMPDU_DETAILS = BIT(9), RX_FLAG_PN_VALIDATED = BIT(10), @@ -1445,8 +1456,10 @@ enum mac80211_rx_flags { RX_FLAG_AMPDU_IS_LAST = BIT(13), RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14), RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15), - RX_FLAG_MACTIME_END = BIT(16), - RX_FLAG_ONLY_MONITOR = BIT(17), + RX_FLAG_MACTIME = BIT(16) | BIT(17), + RX_FLAG_MACTIME_PLCP_START = 1 << 16, + RX_FLAG_MACTIME_START = 2 << 16, + RX_FLAG_MACTIME_END = 3 << 16, RX_FLAG_SKIP_MONITOR = BIT(18), RX_FLAG_AMSDU_MORE = BIT(19), RX_FLAG_RADIOTAP_TLV_AT_END = BIT(20), @@ -1757,15 +1770,15 @@ struct ieee80211_channel_switch { * @IEEE80211_VIF_GET_NOA_UPDATE: request to handle NOA attributes * and send P2P_PS notification to the driver if NOA changed, even * this is not pure P2P vif. - * @IEEE80211_VIF_DISABLE_SMPS_OVERRIDE: disable user configuration of - * SMPS mode via debugfs. + * @IEEE80211_VIF_EML_ACTIVE: The driver indicates that EML operation is + * enabled for the interface. */ enum ieee80211_vif_flags { IEEE80211_VIF_BEACON_FILTER = BIT(0), IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1), IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2), IEEE80211_VIF_GET_NOA_UPDATE = BIT(3), - IEEE80211_VIF_DISABLE_SMPS_OVERRIDE = BIT(4), + IEEE80211_VIF_EML_ACTIVE = BIT(4), }; @@ -1938,7 +1951,7 @@ static inline bool ieee80211_vif_is_mld(const struct ieee80211_vif *vif) for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \ if ((!(vif)->active_links || \ (vif)->active_links & BIT(link_id)) && \ - (link = rcu_dereference((vif)->link_conf[link_id]))) + (link = link_conf_dereference_check(vif, link_id))) static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) { @@ -1971,22 +1984,18 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); */ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); -/** - * lockdep_vif_mutex_held - for lockdep checks on link poiners - * @vif: the interface to check - */ -static inline bool lockdep_vif_mutex_held(struct ieee80211_vif *vif) +static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif) { - return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->mtx); + return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->wiphy->mtx); } #define link_conf_dereference_protected(vif, link_id) \ rcu_dereference_protected((vif)->link_conf[link_id], \ - lockdep_vif_mutex_held(vif)) + lockdep_vif_wiphy_mutex_held(vif)) #define link_conf_dereference_check(vif, link_id) \ rcu_dereference_check((vif)->link_conf[link_id], \ - lockdep_vif_mutex_held(vif)) + lockdep_vif_wiphy_mutex_held(vif)) /** * enum ieee80211_key_flags - key flags @@ -2393,7 +2402,7 @@ static inline bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta) for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) \ if ((!(vif)->active_links || \ (vif)->active_links & BIT(link_id)) && \ - ((link_sta) = link_sta_dereference_protected(sta, link_id))) + ((link_sta) = link_sta_dereference_check(sta, link_id))) /** * enum sta_notify_cmd - sta notify command @@ -2680,6 +2689,9 @@ struct ieee80211_txq { * @IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX: Hardware/driver handles transmitting * multicast frames on all links, mac80211 should not do that. * + * @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT + * and connecting with a lower bandwidth instead + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2737,6 +2749,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP, IEEE80211_HW_DETECTS_COLOR_COLLISION, IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX, + IEEE80211_HW_DISALLOW_PUNCTURING, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -2825,8 +2838,6 @@ enum ieee80211_hw_flags { * the default is _GI | _BANDWIDTH. * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values. * - * @radiotap_he: HE radiotap validity flags - * * @radiotap_timestamp: Information for the radiotap timestamp field; if the * @units_pos member is set to a non-negative value then the timestamp * field will be added and populated from the &struct ieee80211_rx_status @@ -3056,7 +3067,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * The set_key() call for the %SET_KEY command should return 0 if * the key is now in use, -%EOPNOTSUPP or -%ENOSPC if it couldn't be * added; if you return 0 then hw_key_idx must be assigned to the - * hardware key index, you are free to use the full u8 range. + * hardware key index. You are free to use the full u8 range. * * Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is * set, mac80211 will not automatically fall back to software crypto if @@ -3066,7 +3077,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * When the cmd is %DISABLE_KEY then it must succeed. * * Note that it is permissible to not decrypt a frame even if a key - * for it has been uploaded to hardware, the stack will not make any + * for it has been uploaded to hardware. The stack will not make any * decision based on whether a key has been uploaded or not but rather * based on the receive flags. * @@ -3081,7 +3092,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * The update_tkip_key() call updates the driver with the new phase 1 key. * This happens every time the iv16 wraps around (every 65536 packets). The * set_key() call will happen only once for each key (unless the AP did - * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is + * rekeying); it will not include a valid phase 1 key. The valid phase 1 key is * provided by update_tkip_key only. The trigger that makes mac80211 call this * handler is software decryption with wrap around of iv16. * @@ -3108,7 +3119,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * * mac80211 has support for various powersave implementations. * - * First, it can support hardware that handles all powersaving by itself, + * First, it can support hardware that handles all powersaving by itself; * such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS hardware * flag. In that case, it will be told about the desired powersave mode * with the %IEEE80211_CONF_PS flag depending on the association status. @@ -3133,12 +3144,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still * required to pass up beacons. The hardware is still required to handle * waking up for multicast traffic; if it cannot the driver must handle that - * as best as it can, mac80211 is too slow to do that. + * as best as it can; mac80211 is too slow to do that. * * Dynamic powersave is an extension to normal powersave in which the * hardware stays awake for a user-specified period of time after sending a * frame so that reply frames need not be buffered and therefore delayed to - * the next wakeup. It's compromise of getting good enough latency when + * the next wakeup. It's a compromise of getting good enough latency when * there's data traffic and still saving significantly power in idle * periods. * @@ -3203,7 +3214,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * Note that change, for the sake of simplification, also includes information * elements appearing or disappearing from the beacon. * - * Some hardware supports an "ignore list" instead, just make sure nothing + * Some hardware supports an "ignore list" instead. Just make sure nothing * that was requested is on the ignore list, and include commonly changing * information element IDs in the ignore list, for example 11 (BSS load) and * the various vendor-assigned IEs with unknown contents (128, 129, 133-136, @@ -3214,7 +3225,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * In addition to these capabilities, hardware should support notifying the * host of changes in the beacon RSSI. This is relevant to implement roaming * when no traffic is flowing (when traffic is flowing we see the RSSI of - * the received data packets). This can consist in notifying the host when + * the received data packets). This can consist of notifying the host when * the RSSI changes significantly or when it drops below or rises above * configurable thresholds. In the future these thresholds will also be * configured by mac80211 (which gets them from userspace) to implement @@ -3361,8 +3372,8 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * period starts for any reason, @release_buffered_frames is called * with the number of frames to be released and which TIDs they are * to come from. In this case, the driver is responsible for setting - * the EOSP (for uAPSD) and MORE_DATA bits in the released frames, - * to help the @more_data parameter is passed to tell the driver if + * the EOSP (for uAPSD) and MORE_DATA bits in the released frames. + * To help the @more_data parameter is passed to tell the driver if * there is more data on other TIDs -- the TIDs to release frames * from are ignored since mac80211 doesn't know how many frames the * buffers for those TIDs contain. @@ -3411,7 +3422,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * Additionally, the driver has to then use these HW queue IDs for the queue * management functions (ieee80211_stop_queue() et al.) * - * The driver is free to set up the queue mappings as needed, multiple virtual + * The driver is free to set up the queue mappings as needed; multiple virtual * interfaces may map to the same hardware queues if needed. The setup has to * happen during add_interface or change_interface callbacks. For example, a * driver supporting station+station and station+AP modes might decide to have @@ -3635,11 +3646,14 @@ enum ieee80211_reconfig_type { * @success: whether the frame exchange was successful, only * used with the mgd_complete_tx() method, and then only * valid for auth and (re)assoc. + * @link_id: the link id on which the frame will be TX'ed. + * Only used with the mgd_prepare_tx() method. */ struct ieee80211_prep_tx_info { u16 duration; u16 subtype; u8 success:1; + int link_id; }; /** @@ -3863,6 +3877,10 @@ struct ieee80211_prep_tx_info { * the station. See @sta_pre_rcu_remove if needed. * This callback can sleep. * + * @vif_add_debugfs: Drivers can use this callback to add a debugfs vif + * directory with its files. This callback should be within a + * CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep. + * * @link_add_debugfs: Drivers can use this callback to add debugfs files * when a link is added to a mac80211 vif. This callback should be within * a CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep. @@ -4067,11 +4085,15 @@ struct ieee80211_prep_tx_info { * This callback must be atomic. * * @get_et_sset_count: Ethtool API to get string-set count. + * Note that the wiphy mutex is not held for this callback since it's + * expected to return a static value. * * @get_et_stats: Ethtool API to get a set of u64 stats. * * @get_et_strings: Ethtool API to get a set of strings to describe stats * and perhaps other supported types of ethtool data-sets. + * Note that the wiphy mutex is not held for this callback since it's + * expected to return a static value. * * @mgd_prepare_tx: Prepare for transmitting a management frame for association * before associated. In multi-channel scenarios, a virtual interface is @@ -4250,6 +4272,8 @@ struct ieee80211_prep_tx_info { * disable background CAC/radar detection. * @net_fill_forward_path: Called from .ndo_fill_forward_path in order to * resolve a path for hardware flow offloading + * @can_activate_links: Checks if a specific active_links bitmap is + * supported by the driver. * @change_vif_links: Change the valid links on an interface, note that while * removing the old link information is still valid (link_conf pointer), * but may immediately disappear after the function returns. The old or @@ -4358,6 +4382,8 @@ struct ieee80211_ops { int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); #ifdef CONFIG_MAC80211_DEBUGFS + void (*vif_add_debugfs)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); void (*link_add_debugfs)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, @@ -4506,7 +4532,8 @@ struct ieee80211_ops { struct ieee80211_prep_tx_info *info); void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + unsigned int link_id); int (*add_chanctx)(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx); @@ -4544,7 +4571,8 @@ struct ieee80211_ops { struct ieee80211_channel_switch *ch_switch); int (*post_channel_switch)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); void (*abort_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw, @@ -4626,6 +4654,9 @@ struct ieee80211_ops { struct ieee80211_sta *sta, struct net_device_path_ctx *ctx, struct net_device_path *path); + bool (*can_activate_links)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 active_links); int (*change_vif_links)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 old_links, u16 new_links, @@ -4890,7 +4921,7 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw); * for a single hardware must be synchronized against each other. Calls to * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be * mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * This function must be called with BHs disabled and RCU read lock * @@ -4915,7 +4946,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta, * for a single hardware must be synchronized against each other. Calls to * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be * mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * This function must be called with BHs disabled. * @@ -4940,7 +4971,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta, * for a single hardware must be synchronized against each other. Calls to * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be * mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * In process context use instead ieee80211_rx_ni(). * @@ -4960,7 +4991,7 @@ static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) * * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not * be mixed for a single hardware.Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call @@ -4975,7 +5006,7 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb); * * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may * not be mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call @@ -5151,7 +5182,7 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw, struct ieee80211_tx_info *info); /** - * ieee80211_tx_status - transmit status callback + * ieee80211_tx_status_skb - transmit status callback * * Call this function for all transmitted frames after they have been * transmitted. It is permissible to not call this function for @@ -5166,13 +5197,13 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw, * @hw: the hardware the frame was transmitted by * @skb: the frame that was transmitted, owned by mac80211 after this call */ -void ieee80211_tx_status(struct ieee80211_hw *hw, - struct sk_buff *skb); +void ieee80211_tx_status_skb(struct ieee80211_hw *hw, + struct sk_buff *skb); /** * ieee80211_tx_status_ext - extended transmit status callback * - * This function can be used as a replacement for ieee80211_tx_status + * This function can be used as a replacement for ieee80211_tx_status_skb() * in drivers that may want to provide extra information that does not * fit into &struct ieee80211_tx_info. * @@ -5189,7 +5220,7 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, /** * ieee80211_tx_status_noskb - transmit status callback without skb * - * This function can be used as a replacement for ieee80211_tx_status + * This function can be used as a replacement for ieee80211_tx_status_skb() * in drivers that cannot reliably map tx status information back to * specific skbs. * @@ -5217,9 +5248,9 @@ static inline void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, /** * ieee80211_tx_status_ni - transmit status callback (in process context) * - * Like ieee80211_tx_status() but can be called in process context. + * Like ieee80211_tx_status_skb() but can be called in process context. * - * Calls to this function, ieee80211_tx_status() and + * Calls to this function, ieee80211_tx_status_skb() and * ieee80211_tx_status_irqsafe() may not be mixed * for a single hardware. * @@ -5230,17 +5261,17 @@ static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw, struct sk_buff *skb) { local_bh_disable(); - ieee80211_tx_status(hw, skb); + ieee80211_tx_status_skb(hw, skb); local_bh_enable(); } /** * ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback * - * Like ieee80211_tx_status() but can be called in IRQ context + * Like ieee80211_tx_status_skb() but can be called in IRQ context * (internally defers to a tasklet.) * - * Calls to this function, ieee80211_tx_status() and + * Calls to this function, ieee80211_tx_status_skb() and * ieee80211_tx_status_ni() may not be mixed for a single hardware. * * @hw: the hardware the frame was transmitted by @@ -5788,12 +5819,11 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, * ieee80211_remove_key - remove the given key * @keyconf: the parameter passed with the set key * + * Context: Must be called with the wiphy mutex held. + * * Remove the given key. If the key was uploaded to the hardware at the * time this function is called, it is not deleted in the hardware but * instead assumed to have been removed already. - * - * Note that due to locking considerations this function can (currently) - * only be called during key iteration (ieee80211_iter_keys().) */ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf); @@ -6347,12 +6377,12 @@ ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq); * @iter: iterator function that will be called for each key * @iter_data: custom data to pass to the iterator function * + * Context: Must be called with wiphy mutex held; can sleep. + * * This function can be used to iterate all the keys known to * mac80211, even those that weren't previously programmed into * the device. This is intended for use in WoWLAN if the device - * needs reprogramming of the keys during suspend. Note that due - * to locking reasons, it is also only safe to call this at few - * spots since it must hold the RTNL and be able to sleep. + * needs reprogramming of the keys during suspend. * * The order in which the keys are iterated matches the order * in which they were originally installed and handed to the @@ -6542,11 +6572,14 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw); * ieee80211_chswitch_done - Complete channel switch process * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @success: make the channel switch successful or not + * @link_id: the link_id on which the switch was done. Ignored if success is + * false. * * Complete the channel switch post-process: set the new operational channel * and wake up the suspended queues. */ -void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success); +void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success, + unsigned int link_id); /** * ieee80211_channel_switch_disconnect - disconnect due to channel switch error @@ -7242,7 +7275,7 @@ ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, * * This function is used to check whether given txq is allowed to transmit by * the airtime scheduler, and can be used by drivers to access the airtime - * fairness accounting without going using the scheduling order enfored by + * fairness accounting without using the scheduling order enforced by * next_txq(). * * Returns %true if the airtime scheduler thinks the TXQ should be allowed to @@ -7411,6 +7444,9 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb) * @vif: interface to set active links on * @active_links: the new active links bitmap * + * Context: Must be called with wiphy mutex held; may sleep; calls + * back into the driver. + * * This changes the active links on an interface. The interface * must be in client mode (in AP mode, all links are always active), * and @active_links must be a subset of the vif's valid_links. @@ -7418,6 +7454,7 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb) * If a link is switched off and another is switched on at the same * time (e.g. active_links going from 0x1 to 0x10) then you will get * a sequence of calls like + * * - change_vif_links(0x11) * - unassign_vif_chanctx(link_id=0) * - change_sta_links(0x11) for each affected STA (the AP) @@ -7427,10 +7464,6 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb) * - change_sta_links(0x10) for each affected STA (the AP) * - assign_vif_chanctx(link_id=4) * - change_vif_links(0x10) - * - * Note: This function acquires some mac80211 locks and must not - * be called with any driver locks held that could cause a - * lock dependency inversion. Best call it without locks. */ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links); diff --git a/include/net/macsec.h b/include/net/macsec.h index 75a6f4863c83..dbd22180cc5c 100644 --- a/include/net/macsec.h +++ b/include/net/macsec.h @@ -247,6 +247,23 @@ struct macsec_secy { /** * struct macsec_context - MACsec context for hardware offloading + * @netdev: a valid pointer to a struct net_device if @offload == + * MACSEC_OFFLOAD_MAC + * @phydev: a valid pointer to a struct phy_device if @offload == + * MACSEC_OFFLOAD_PHY + * @offload: MACsec offload status + * @secy: pointer to a MACsec SecY + * @rx_sc: pointer to a RX SC + * @update_pn: when updating the SA, update the next PN + * @assoc_num: association number of the target SA + * @key: key of the target SA + * @rx_sa: pointer to an RX SA if a RX SA is added/updated/removed + * @tx_sa: pointer to an TX SA if a TX SA is added/updated/removed + * @tx_sc_stats: pointer to TX SC stats structure + * @tx_sa_stats: pointer to TX SA stats structure + * @rx_sc_stats: pointer to RX SC stats structure + * @rx_sa_stats: pointer to RX SA stats structure + * @dev_stats: pointer to dev stats structure */ struct macsec_context { union { @@ -258,6 +275,7 @@ struct macsec_context { struct macsec_secy *secy; struct macsec_rx_sc *rx_sc; struct { + bool update_pn; unsigned char assoc_num; u8 key[MACSEC_MAX_KEY_LEN]; union { @@ -276,6 +294,33 @@ struct macsec_context { /** * struct macsec_ops - MACsec offloading operations + * @mdo_dev_open: called when the MACsec interface transitions to the up state + * @mdo_dev_stop: called when the MACsec interface transitions to the down + * state + * @mdo_add_secy: called when a new SecY is added + * @mdo_upd_secy: called when the SecY flags are changed or the MAC address of + * the MACsec interface is changed + * @mdo_del_secy: called when the hw offload is disabled or the MACsec + * interface is removed + * @mdo_add_rxsc: called when a new RX SC is added + * @mdo_upd_rxsc: called when a certain RX SC is updated + * @mdo_del_rxsc: called when a certain RX SC is removed + * @mdo_add_rxsa: called when a new RX SA is added + * @mdo_upd_rxsa: called when a certain RX SA is updated + * @mdo_del_rxsa: called when a certain RX SA is removed + * @mdo_add_txsa: called when a new TX SA is added + * @mdo_upd_txsa: called when a certain TX SA is updated + * @mdo_del_txsa: called when a certain TX SA is removed + * @mdo_get_dev_stats: called when dev stats are read + * @mdo_get_tx_sc_stats: called when TX SC stats are read + * @mdo_get_tx_sa_stats: called when TX SA stats are read + * @mdo_get_rx_sc_stats: called when RX SC stats are read + * @mdo_get_rx_sa_stats: called when RX SA stats are read + * @mdo_insert_tx_tag: called to insert the TX tag + * @needed_headroom: number of bytes reserved at the beginning of the sk_buff + * for the TX tag + * @needed_tailroom: number of bytes reserved at the end of the sk_buff for the + * TX tag */ struct macsec_ops { /* Device wide */ @@ -302,6 +347,11 @@ struct macsec_ops { int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx); int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx); int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx); + /* Offload tag */ + int (*mdo_insert_tx_tag)(struct phy_device *phydev, + struct sk_buff *skb); + unsigned int needed_headroom; + unsigned int needed_tailroom; }; void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa); @@ -324,4 +374,9 @@ static inline void *macsec_netdev_priv(const struct net_device *dev) return netdev_priv(dev); } +static inline u64 sci_to_cpu(sci_t sci) +{ + return be64_to_cpu((__force __be64)sci); +} + #endif /* _NET_MACSEC_H_ */ diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 88b6ef7ce1a6..27684135bb4d 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -66,6 +66,7 @@ enum { GDMA_DEVICE_NONE = 0, GDMA_DEVICE_HWC = 1, GDMA_DEVICE_MANA = 2, + GDMA_DEVICE_MANA_IB = 3, }; struct gdma_resource { @@ -149,6 +150,7 @@ struct gdma_general_req { #define GDMA_MESSAGE_V1 1 #define GDMA_MESSAGE_V2 2 +#define GDMA_MESSAGE_V3 3 struct gdma_general_resp { struct gdma_resp_hdr hdr; @@ -293,6 +295,7 @@ struct gdma_queue { u32 head; u32 tail; + struct list_head entry; /* Extra fields specific to EQ/CQ. */ union { @@ -328,6 +331,7 @@ struct gdma_queue_spec { void *context; unsigned long log2_throttle_limit; + unsigned int msix_index; } eq; struct { @@ -344,7 +348,9 @@ struct gdma_queue_spec { struct gdma_irq_context { void (*handler)(void *arg); - void *arg; + /* Protect the eq_list */ + spinlock_t lock; + struct list_head eq_list; char name[MANA_IRQ_NAME_SZ]; }; @@ -355,7 +361,6 @@ struct gdma_context { unsigned int max_num_queues; unsigned int max_num_msix; unsigned int num_msix_usable; - struct gdma_resource msix_resource; struct gdma_irq_context *irq_contexts; /* L2 MTU */ @@ -387,6 +392,9 @@ struct gdma_context { /* Azure network adapter */ struct gdma_dev mana; + + /* Azure RDMA adapter */ + struct gdma_dev mana_ib; }; #define MAX_NUM_GDMA_DEVICES 4 diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h index 3d3b5c881bc1..158b125692c2 100644 --- a/include/net/mana/hw_channel.h +++ b/include/net/mana/hw_channel.h @@ -121,7 +121,7 @@ struct hwc_dma_buf { u32 gpa_mkey; u32 num_reqs; - struct hwc_work_request reqs[]; + struct hwc_work_request reqs[] __counted_by(num_reqs); }; typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id, diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 9f70b4332238..76147feb0d10 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -103,9 +103,10 @@ struct mana_txq { /* skb data and frags dma mappings */ struct mana_skb_head { - dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; + /* GSO pkts may have 2 SGEs for the linear part*/ + dma_addr_t dma_handle[MAX_SKB_FRAGS + 2]; - u32 size[MAX_SKB_FRAGS + 1]; + u32 size[MAX_SKB_FRAGS + 2]; }; #define MANA_HEADROOM sizeof(struct mana_skb_head) @@ -338,7 +339,7 @@ struct mana_rxq { /* MUST BE THE LAST MEMBER: * Each receive buffer has an associated mana_recv_buf_oob. */ - struct mana_recv_buf_oob rx_oobs[]; + struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); }; struct mana_tx_qp { @@ -352,6 +353,25 @@ struct mana_tx_qp { struct mana_ethtool_stats { u64 stop_queue; u64 wake_queue; + u64 hc_rx_discards_no_wqe; + u64 hc_rx_err_vport_disabled; + u64 hc_rx_bytes; + u64 hc_rx_ucast_pkts; + u64 hc_rx_ucast_bytes; + u64 hc_rx_bcast_pkts; + u64 hc_rx_bcast_bytes; + u64 hc_rx_mcast_pkts; + u64 hc_rx_mcast_bytes; + u64 hc_tx_err_gf_disabled; + u64 hc_tx_err_vport_disabled; + u64 hc_tx_err_inval_vportoffset_pkt; + u64 hc_tx_err_vlan_enforcement; + u64 hc_tx_err_eth_type_enforcement; + u64 hc_tx_err_sa_enforcement; + u64 hc_tx_err_sqpdid_enforcement; + u64 hc_tx_err_cqpdid_enforcement; + u64 hc_tx_err_mtu_violation; + u64 hc_tx_err_inval_oob; u64 hc_tx_bytes; u64 hc_tx_ucast_pkts; u64 hc_tx_ucast_bytes; @@ -359,6 +379,7 @@ struct mana_ethtool_stats { u64 hc_tx_bcast_bytes; u64 hc_tx_mcast_pkts; u64 hc_tx_mcast_bytes; + u64 hc_tx_err_gdma; u64 tx_cqe_err; u64 tx_cqe_unknown_type; u64 rx_coalesced_err; @@ -601,8 +622,8 @@ struct mana_query_gf_stat_resp { struct gdma_resp_hdr hdr; u64 reported_stats; /* rx errors/discards */ - u64 discard_rx_nowqe; - u64 err_rx_vport_disabled; + u64 rx_discards_nowqe; + u64 rx_err_vport_disabled; /* rx bytes/packets */ u64 hc_rx_bytes; u64 hc_rx_ucast_pkts; @@ -612,16 +633,16 @@ struct mana_query_gf_stat_resp { u64 hc_rx_mcast_pkts; u64 hc_rx_mcast_bytes; /* tx errors */ - u64 err_tx_gf_disabled; - u64 err_tx_vport_disabled; - u64 err_tx_inval_vport_offset_pkt; - u64 err_tx_vlan_enforcement; - u64 err_tx_ethtype_enforcement; - u64 err_tx_SA_enforecement; - u64 err_tx_SQPDID_enforcement; - u64 err_tx_CQPDID_enforcement; - u64 err_tx_mtu_violation; - u64 err_tx_inval_oob; + u64 tx_err_gf_disabled; + u64 tx_err_vport_disabled; + u64 tx_err_inval_vport_offset_pkt; + u64 tx_err_vlan_enforcement; + u64 tx_err_ethtype_enforcement; + u64 tx_err_SA_enforcement; + u64 tx_err_SQPDID_enforcement; + u64 tx_err_CQPDID_enforcement; + u64 tx_err_mtu_violation; + u64 tx_err_inval_oob; /* tx bytes/packets */ u64 hc_tx_bytes; u64 hc_tx_ucast_pkts; @@ -631,7 +652,7 @@ struct mana_query_gf_stat_resp { u64 hc_tx_mcast_pkts; u64 hc_tx_mcast_bytes; /* tx error */ - u64 err_tx_gdma; + u64 tx_err_gdma; }; /* HW DATA */ /* Configure vPort Rx Steering */ diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 6da68886fabb..0d28172193fa 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -162,7 +162,7 @@ struct neighbour { struct rcu_head rcu; struct net_device *dev; netdevice_tracker dev_tracker; - u8 primary_key[0]; + u8 primary_key[]; } __randomize_layout; struct neigh_ops { @@ -539,7 +539,7 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb, READ_ONCE(hh->hh_len)) return neigh_hh_output(hh, skb); - return n->output(n, skb); + return READ_ONCE(n->output)(n, skb); } static inline struct neighbour * diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index eb6cd43b1746..13b3a4e29fdb 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -368,21 +368,30 @@ static inline void put_net_track(struct net *net, netns_tracker *tracker) typedef struct { #ifdef CONFIG_NET_NS - struct net *net; + struct net __rcu *net; #endif } possible_net_t; static inline void write_pnet(possible_net_t *pnet, struct net *net) { #ifdef CONFIG_NET_NS - pnet->net = net; + rcu_assign_pointer(pnet->net, net); #endif } static inline struct net *read_pnet(const possible_net_t *pnet) { #ifdef CONFIG_NET_NS - return pnet->net; + return rcu_dereference_protected(pnet->net, true); +#else + return &init_net; +#endif +} + +static inline struct net *read_pnet_rcu(possible_net_t *pnet) +{ +#ifdef CONFIG_NET_NS + return rcu_dereference(pnet->net); #else return &init_net; #endif diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h index d68b0a483431..8b8ed4e13d74 100644 --- a/include/net/netdev_queues.h +++ b/include/net/netdev_queues.h @@ -128,7 +128,7 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue, netdev_txq_completed_mb(txq, pkts, bytes); \ \ _res = -1; \ - if (pkts && likely(get_desc > start_thrs)) { \ + if (pkts && likely(get_desc >= start_thrs)) { \ _res = 1; \ if (unlikely(netif_tx_queue_stopped(txq)) && \ !(down_cond)) { \ diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index cdcafb30d437..aa1716fb0e53 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -21,6 +21,10 @@ struct netdev_rx_queue { #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif + /* NAPI instance for the queue + * Readers and writers must hold RTNL + */ + struct napi_struct *napi; } ____cacheline_aligned_in_smp; /* diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 4085765c3370..cba3ccf03fcc 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -160,10 +160,6 @@ static inline struct net *nf_ct_net(const struct nf_conn *ct) return read_pnet(&ct->ct_net); } -/* Alter reply tuple (maybe alter helper). */ -void nf_conntrack_alter_reply(struct nf_conn *ct, - const struct nf_conntrack_tuple *newreply); - /* Is this tuple taken? (ignoring any belonging to the given conntrack). */ int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, @@ -284,6 +280,16 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb) return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; } +static inline void nf_conntrack_alter_reply(struct nf_conn *ct, + const struct nf_conntrack_tuple *newreply) +{ + /* Must be unconfirmed, so not in hash table yet */ + if (WARN_ON(nf_ct_is_confirmed(ct))) + return; + + ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; +} + #define nfct_time_stamp ((u32)(jiffies)) /* jiffies until ct expires, 0 if already expired */ diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h index 078d3c52c03f..e5f2f0b73a9a 100644 --- a/include/net/netfilter/nf_conntrack_act_ct.h +++ b/include/net/netfilter/nf_conntrack_act_ct.h @@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf #endif } -static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct) +static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo) +{ +#if IS_ENABLED(CONFIG_NET_ACT_CT) + struct nf_conn_act_ct_ext *act_ct_ext; + + act_ct_ext = nf_conn_act_ct_ext_find(ct); + if (dev_net(skb->dev) == &init_net && act_ct_ext) + act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex; +#endif +} + +static inline struct +nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo) { #if IS_ENABLED(CONFIG_NET_ACT_CT) struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT); @@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn * return act_ct; act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC); + nf_conn_act_ct_ext_fill(skb, ct, ctinfo); return act_ct; #else return NULL; #endif } -static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo) -{ -#if IS_ENABLED(CONFIG_NET_ACT_CT) - struct nf_conn_act_ct_ext *act_ct_ext; - - act_ct_ext = nf_conn_act_ct_ext_find(ct); - if (dev_net(skb->dev) == &init_net && act_ct_ext) - act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex; -#endif -} - #endif /* _NF_CONNTRACK_ACT_CT_H */ diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index fcb19a4e8f2b..6903f72bcc15 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -39,7 +39,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) #ifdef CONFIG_NF_CONNTRACK_LABELS struct net *net = nf_ct_net(ct); - if (net->ct.labels_used == 0) + if (atomic_read(&net->ct.labels_used) == 0) return NULL; return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index d466e1a3b0b1..956c752ceb31 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -53,6 +53,7 @@ struct nf_flowtable_type { struct list_head list; int family; int (*init)(struct nf_flowtable *ft); + bool (*gc)(const struct flow_offload *flow); int (*setup)(struct nf_flowtable *ft, struct net_device *dev, enum flow_block_command cmd); @@ -61,6 +62,8 @@ struct nf_flowtable_type { enum flow_offload_tuple_dir dir, struct nf_flow_rule *flow_rule); void (*free)(struct nf_flowtable *ft); + void (*get)(struct nf_flowtable *ft); + void (*put)(struct nf_flowtable *ft); nf_hookfn *hook; struct module *owner; }; @@ -71,12 +74,13 @@ enum nf_flowtable_flags { }; struct nf_flowtable { - struct list_head list; - struct rhashtable rhashtable; - int priority; + unsigned int flags; /* readonly in datapath */ + int priority; /* control path (padding hole) */ + struct rhashtable rhashtable; /* datapath, read-mostly members come first */ + + struct list_head list; /* slowpath parts */ const struct nf_flowtable_type *type; struct delayed_work gc_work; - unsigned int flags; struct flow_block flow_block; struct rw_semaphore flow_block_lock; /* Guards flow_block */ possible_net_t net; @@ -239,6 +243,11 @@ nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table, } list_add_tail(&block_cb->list, &block->cb_list); + up_write(&flow_table->flow_block_lock); + + if (flow_table->type->get) + flow_table->type->get(flow_table); + return 0; unlock: up_write(&flow_table->flow_block_lock); @@ -261,6 +270,9 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table, WARN_ON(true); } up_write(&flow_table->flow_block_lock); + + if (flow_table->type->put) + flow_table->type->put(flow_table); } void flow_offload_route_init(struct flow_offload *flow, diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index dd40c75011d2..510244cc0f8f 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -178,9 +178,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg) return *(__force __be32 *)sreg; } -static inline void nft_reg_store64(u32 *dreg, u64 val) +static inline void nft_reg_store64(u64 *dreg, u64 val) { - put_unaligned(val, (u64 *)dreg); + put_unaligned(val, dreg); } static inline u64 nft_reg_load64(const u32 *sreg) @@ -205,6 +205,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src, * @nla: netlink attributes * @portid: netlink portID of the original message * @seq: netlink sequence number + * @flags: modifiers to new request * @family: protocol family * @level: depth of the chains * @report: notify via unicast netlink message @@ -274,11 +275,15 @@ struct nft_userdata { unsigned char data[]; }; +/* placeholder structure for opaque set element backend representation. */ +struct nft_elem_priv { }; + /** * struct nft_set_elem - generic representation of set elements * * @key: element key * @key_end: closing element key + * @data: element data * @priv: element private data and extensions */ struct nft_set_elem { @@ -294,9 +299,14 @@ struct nft_set_elem { u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)]; struct nft_data val; } data; - void *priv; + struct nft_elem_priv *priv; }; +static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv) +{ + return (void *)priv; +} + struct nft_set; struct nft_set_iter { u8 genmask; @@ -306,7 +316,7 @@ struct nft_set_iter { int (*fn)(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, - struct nft_set_elem *elem); + struct nft_elem_priv *elem_priv); }; /** @@ -317,10 +327,10 @@ struct nft_set_iter { * @dtype: data type * @dlen: data length * @objtype: object type - * @flags: flags * @size: number of set elements * @policy: set policy * @gc_int: garbage collector interval + * @timeout: element timeout * @field_len: length of each field in concatenation, bytes * @field_count: number of concatenated fields in element * @expr: set must support for expressions @@ -343,9 +353,9 @@ struct nft_set_desc { /** * enum nft_set_class - performance class * - * @NFT_LOOKUP_O_1: constant, O(1) - * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N) - * @NFT_LOOKUP_O_N: linear, O(N) + * @NFT_SET_CLASS_O_1: constant, O(1) + * @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N) + * @NFT_SET_CLASS_O_N: linear, O(N) */ enum nft_set_class { NFT_SET_CLASS_O_1, @@ -414,9 +424,13 @@ struct nft_set_ext; * @remove: remove element from set * @walk: iterate over all set elements * @get: get set elements + * @commit: commit set elements + * @abort: abort set elements * @privsize: function to return size of set private data + * @estimate: estimate the required memory size and the lookup complexity class * @init: initialize private data of new set instance * @destroy: destroy private data of set instance + * @gc_init: initialize garbage collection * @elemsize: element private size * * Operations lookup, update and delete have simpler interfaces, are faster @@ -430,7 +444,8 @@ struct nft_set_ops { const struct nft_set_ext **ext); bool (*update)(struct nft_set *set, const u32 *key, - void *(*new)(struct nft_set *, + struct nft_elem_priv * + (*new)(struct nft_set *, const struct nft_expr *, struct nft_regs *), const struct nft_expr *expr, @@ -442,27 +457,27 @@ struct nft_set_ops { int (*insert)(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, - struct nft_set_ext **ext); + struct nft_elem_priv **priv); void (*activate)(const struct net *net, const struct nft_set *set, - const struct nft_set_elem *elem); - void * (*deactivate)(const struct net *net, + struct nft_elem_priv *elem_priv); + struct nft_elem_priv * (*deactivate)(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem); - bool (*flush)(const struct net *net, + void (*flush)(const struct net *net, const struct nft_set *set, - void *priv); + struct nft_elem_priv *priv); void (*remove)(const struct net *net, const struct nft_set *set, - const struct nft_set_elem *elem); + struct nft_elem_priv *elem_priv); void (*walk)(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_iter *iter); - void * (*get)(const struct net *net, + struct nft_elem_priv * (*get)(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags); - void (*commit)(const struct nft_set *set); + void (*commit)(struct nft_set *set); void (*abort)(const struct nft_set *set); u64 (*privsize)(const struct nlattr * const nla[], const struct nft_set_desc *desc); @@ -531,13 +546,16 @@ struct nft_set_elem_expr { * @policy: set parameterization (see enum nft_set_policies) * @udlen: user data length * @udata: user data - * @expr: stateful expression + * @pending_update: list of pending update set element * @ops: set ops * @flags: set flags * @dead: set will be freed, never cleared * @genmask: generation mask * @klen: key length * @dlen: data length + * @num_exprs: numbers of exprs + * @exprs: stateful expression + * @catchall_list: list of catch-all set element * @data: private set data */ struct nft_set { @@ -683,6 +701,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[]; * * @len: length of extension area * @offset: offsets of individual extension types + * @ext_len: length of the expected extension(used to sanity check) */ struct nft_set_ext_tmpl { u16 len; @@ -789,16 +808,22 @@ static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ex return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS); } -static inline bool nft_set_elem_expired(const struct nft_set_ext *ext) +static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext, + u64 tstamp) { return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) && - time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext)); + time_after_eq64(tstamp, *nft_set_ext_expiration(ext)); +} + +static inline bool nft_set_elem_expired(const struct nft_set_ext *ext) +{ + return __nft_set_elem_expired(ext, get_jiffies_64()); } static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set, - void *elem) + const struct nft_elem_priv *elem_priv) { - return elem + set->ops->elemsize; + return (void *)elem_priv + set->ops->elemsize; } static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext) @@ -810,16 +835,19 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx, const struct nft_set *set, const struct nlattr *attr); -void *nft_set_elem_init(const struct nft_set *set, - const struct nft_set_ext_tmpl *tmpl, - const u32 *key, const u32 *key_end, const u32 *data, - u64 timeout, u64 expiration, gfp_t gfp); +struct nft_elem_priv *nft_set_elem_init(const struct nft_set *set, + const struct nft_set_ext_tmpl *tmpl, + const u32 *key, const u32 *key_end, + const u32 *data, + u64 timeout, u64 expiration, gfp_t gfp); int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set, struct nft_expr *expr_array[]); -void nft_set_elem_destroy(const struct nft_set *set, void *elem, +void nft_set_elem_destroy(const struct nft_set *set, + const struct nft_elem_priv *elem_priv, bool destroy_expr); void nf_tables_set_elem_destroy(const struct nft_ctx *ctx, - const struct nft_set *set, void *elem); + const struct nft_set *set, + const struct nft_elem_priv *elem_priv); struct nft_expr_ops; /** @@ -828,6 +856,7 @@ struct nft_expr_ops; * @select_ops: function to select nft_expr_ops * @release_ops: release nft_expr_ops * @ops: default ops, used when no select_ops functions is present + * @inner_ops: inner ops, used for inner packet operation * @list: used internally * @name: Identifier * @owner: module reference @@ -869,14 +898,22 @@ struct nft_offload_ctx; * struct nft_expr_ops - nf_tables expression operations * * @eval: Expression evaluation function + * @clone: Expression clone function * @size: full expression size, including private data size * @init: initialization function * @activate: activate expression in the next generation * @deactivate: deactivate expression in next generation * @destroy: destruction function, called after synchronize_rcu + * @destroy_clone: destruction clone function * @dump: function to dump parameters - * @type: expression type * @validate: validate expression, called during loop detection + * @reduce: reduce expression + * @gc: garbage collection expression + * @offload: hardware offload expression + * @offload_action: function to report true/false to allocate one slot or not in the flow + * offload array + * @offload_stats: function to synchronize hardware stats via updating the counter expression + * @type: expression type * @data: extra data to attach to this expression operation */ struct nft_expr_ops { @@ -1029,14 +1066,21 @@ struct nft_rule_blob { /** * struct nft_chain - nf_tables chain * + * @blob_gen_0: rule blob pointer to the current generation + * @blob_gen_1: rule blob pointer to the future generation * @rules: list of rules in the chain * @list: used internally * @rhlhead: used internally * @table: table that this chain belongs to * @handle: chain handle * @use: number of jump references to this chain - * @flags: bitmask of enum nft_chain_flags + * @flags: bitmask of enum NFTA_CHAIN_FLAGS + * @bound: bind or not + * @genmask: generation mask * @name: name of the chain + * @udlen: user data length + * @udata: user data in the chain + * @blob_next: rule blob pointer to the next in the chain */ struct nft_chain { struct nft_rule_blob __rcu *blob_gen_0; @@ -1061,7 +1105,7 @@ struct nft_chain { int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain); int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, - struct nft_set_elem *elem); + struct nft_elem_priv *elem_priv); int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set); int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain); void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain); @@ -1134,6 +1178,7 @@ struct nft_hook { * @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family) * @type: chain type * @policy: default policy + * @flags: indicate the base chain disabled or not * @stats: per-cpu chain stats * @chain: the chain * @flow_block: flow block (for hardware offload) @@ -1198,10 +1243,13 @@ static inline void nft_use_inc_restore(u32 *use) * @hgenerator: handle generator state * @handle: table handle * @use: number of chain references to this table + * @family:address family * @flags: table flag (see enum nft_table_flags) * @genmask: generation mask - * @afinfo: address family info + * @nlpid: netlink port ID * @name: name of the table + * @udlen: length of the user data + * @udata: user data * @validate_state: internal, set when transaction adds jumps */ struct nft_table { @@ -1259,11 +1307,13 @@ struct nft_object_hash_key { * struct nft_object - nf_tables stateful object * * @list: table stateful object list node - * @key: keys that identify this object * @rhlhead: nft_objname_ht node + * @key: keys that identify this object * @genmask: generation mask * @use: number of references to this stateful object * @handle: unique object handle + * @udlen: length of user data + * @udata: user data * @ops: object operations * @data: object data, layout depends on type */ @@ -1307,6 +1357,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table, * @type: stateful object numeric type * @owner: module owner * @maxattr: maximum netlink attribute + * @family: address family for AF-specific object types * @policy: netlink attribute policy */ struct nft_object_type { @@ -1316,6 +1367,7 @@ struct nft_object_type { struct list_head list; u32 type; unsigned int maxattr; + u8 family; struct module *owner; const struct nla_policy *policy; }; @@ -1329,6 +1381,7 @@ struct nft_object_type { * @destroy: release existing stateful object * @dump: netlink dump stateful object * @update: update stateful object + * @type: pointer to object type */ struct nft_object_ops { void (*eval)(struct nft_object *obj, @@ -1364,9 +1417,8 @@ void nft_unregister_obj(struct nft_object_type *obj_type); * @genmask: generation mask * @use: number of references to this flow table * @handle: unique object handle - * @dev_name: array of device names + * @hook_list: hook list for hooks per net_device in flowtables * @data: rhashtable and garbage collector - * @ops: array of hooks */ struct nft_flowtable { struct list_head list; @@ -1635,14 +1687,14 @@ struct nft_trans_table { struct nft_trans_elem { struct nft_set *set; - struct nft_set_elem elem; + struct nft_elem_priv *elem_priv; bool bound; }; #define nft_trans_elem_set(trans) \ (((struct nft_trans_elem *)trans->data)->set) -#define nft_trans_elem(trans) \ - (((struct nft_trans_elem *)trans->data)->elem) +#define nft_trans_elem_priv(trans) \ + (((struct nft_trans_elem *)trans->data)->elem_priv) #define nft_trans_elem_set_bound(trans) \ (((struct nft_trans_elem *)trans->data)->bound) @@ -1682,8 +1734,8 @@ struct nft_trans_gc { struct net *net; struct nft_set *set; u32 seq; - u8 count; - void *priv[NFT_TRANS_GC_BATCHCOUNT]; + u16 count; + struct nft_elem_priv *priv[NFT_TRANS_GC_BATCHCOUNT]; struct rcu_head rcu; }; @@ -1700,12 +1752,13 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans); void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv); -struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc, - unsigned int gc_seq); +struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc, + unsigned int gc_seq); +struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc); void nft_setelem_data_deactivate(const struct net *net, const struct nft_set *set, - struct nft_set_elem *elem); + struct nft_elem_priv *elem_priv); int __init nft_chain_filter_init(void); void nft_chain_filter_fini(void); @@ -1732,6 +1785,7 @@ struct nftables_pernet { struct list_head notify_list; struct mutex commit_mutex; u64 table_handle; + u64 tstamp; unsigned int base_seq; unsigned int gc_seq; u8 validate_state; @@ -1744,6 +1798,11 @@ static inline struct nftables_pernet *nft_pernet(const struct net *net) return net_generic(net, nf_tables_net_id); } +static inline u64 nft_net_tstamp(const struct net *net) +{ + return nft_pernet(net)->tstamp; +} + #define __NFT_REDUCE_READONLY 1UL #define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h index 947973623dc7..60a7d0ce3080 100644 --- a/include/net/netfilter/nf_tables_ipv4.h +++ b/include/net/netfilter/nf_tables_ipv4.h @@ -30,7 +30,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt) return -1; len = iph_totlen(pkt->skb, iph); - thoff = iph->ihl * 4; + thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4); if (pkt->skb->len < len) return -1; else if (len < thoff) diff --git a/include/net/netkit.h b/include/net/netkit.h new file mode 100644 index 000000000000..9ec0163739f4 --- /dev/null +++ b/include/net/netkit.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023 Isovalent */ +#ifndef __NET_NETKIT_H +#define __NET_NETKIT_H + +#include <linux/bpf.h> + +#ifdef CONFIG_NETKIT +int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog); +int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev)); +#else +static inline int netkit_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int netkit_link_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int netkit_prog_detach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int netkit_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} + +static inline struct net_device *netkit_peer_dev(struct net_device *dev) +{ + return NULL; +} +#endif /* CONFIG_NETKIT */ +#endif /* __NET_NETKIT_H */ diff --git a/include/net/netlink.h b/include/net/netlink.h index 8a7cd1170e1f..c19ff921b661 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -128,6 +128,8 @@ * nla_len(nla) length of attribute payload * * Attribute Payload Access for Basic Types: + * nla_get_uint(nla) get payload for a uint attribute + * nla_get_sint(nla) get payload for a sint attribute * nla_get_u8(nla) get payload for a u8 attribute * nla_get_u16(nla) get payload for a u16 attribute * nla_get_u32(nla) get payload for a u32 attribute @@ -183,6 +185,8 @@ enum { NLA_REJECT, NLA_BE16, NLA_BE32, + NLA_SINT, + NLA_UINT, __NLA_TYPE_MAX, }; @@ -229,6 +233,7 @@ enum nla_policy_validation { * nested header (or empty); len field is used if * nested_policy is also used, for the max attr * number in the nested policy. + * NLA_SINT, NLA_UINT, * NLA_U8, NLA_U16, * NLA_U32, NLA_U64, * NLA_S8, NLA_S16, @@ -260,12 +265,14 @@ enum nla_policy_validation { * while an array has the nested attributes at another * level down and the attribute types directly in the * nesting don't matter. + * NLA_UINT, * NLA_U8, * NLA_U16, * NLA_U32, * NLA_U64, * NLA_BE16, * NLA_BE32, + * NLA_SINT, * NLA_S8, * NLA_S16, * NLA_S32, @@ -280,6 +287,7 @@ enum nla_policy_validation { * or NLA_POLICY_FULL_RANGE_SIGNED() macros instead. * Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and * NLA_POLICY_RANGE() macros. + * NLA_UINT, * NLA_U8, * NLA_U16, * NLA_U32, @@ -288,6 +296,7 @@ enum nla_policy_validation { * to a struct netlink_range_validation that indicates * the min/max values. * Use NLA_POLICY_FULL_RANGE(). + * NLA_SINT, * NLA_S8, * NLA_S16, * NLA_S32, @@ -351,8 +360,8 @@ struct nla_policy { const u32 mask; const char *reject_message; const struct nla_policy *nested_policy; - struct netlink_range_validation *range; - struct netlink_range_validation_signed *range_signed; + const struct netlink_range_validation *range; + const struct netlink_range_validation_signed *range_signed; struct { s16 min, max; }; @@ -377,9 +386,11 @@ struct nla_policy { #define __NLA_IS_UINT_TYPE(tp) \ (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || \ - tp == NLA_U64 || tp == NLA_BE16 || tp == NLA_BE32) + tp == NLA_U64 || tp == NLA_UINT || \ + tp == NLA_BE16 || tp == NLA_BE32) #define __NLA_IS_SINT_TYPE(tp) \ - (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64) + (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64 || \ + tp == NLA_SINT) #define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition)) #define NLA_ENSURE_UINT_TYPE(tp) \ @@ -1000,6 +1011,20 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags) } /** + * nlmsg_new_large - Allocate a new netlink message with non-contiguous + * physical memory + * @payload: size of the message payload + * + * The allocated skb is unable to have frag page for shinfo->frags*, + * as the NULL setting for skb->head in netlink_skb_destructor() will + * bypass most of the handling in skb_release_data() + */ +static inline struct sk_buff *nlmsg_new_large(size_t payload) +{ + return netlink_alloc_large_skb(nlmsg_total_size(payload), 0); +} + +/** * nlmsg_end - Finalize a netlink message * @skb: socket buffer the message is stored in * @nlh: netlink message header @@ -1062,21 +1087,29 @@ static inline void nlmsg_free(struct sk_buff *skb) } /** - * nlmsg_multicast - multicast a netlink message + * nlmsg_multicast_filtered - multicast a netlink message with filter function * @sk: netlink socket to spread messages to * @skb: netlink message as socket buffer * @portid: own netlink portid to avoid sending to yourself * @group: multicast group id * @flags: allocation flags + * @filter: filter function + * @filter_data: filter function private data + * + * Return: 0 on success, negative error code for failure. */ -static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb, - u32 portid, unsigned int group, gfp_t flags) +static inline int nlmsg_multicast_filtered(struct sock *sk, struct sk_buff *skb, + u32 portid, unsigned int group, + gfp_t flags, + netlink_filter_fn filter, + void *filter_data) { int err; NETLINK_CB(skb).dst_group = group; - err = netlink_broadcast(sk, skb, portid, group, flags); + err = netlink_broadcast_filtered(sk, skb, portid, group, flags, + filter, filter_data); if (err > 0) err = 0; @@ -1084,6 +1117,21 @@ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb, } /** + * nlmsg_multicast - multicast a netlink message + * @sk: netlink socket to spread messages to + * @skb: netlink message as socket buffer + * @portid: own netlink portid to avoid sending to yourself + * @group: multicast group id + * @flags: allocation flags + */ +static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb, + u32 portid, unsigned int group, gfp_t flags) +{ + return nlmsg_multicast_filtered(sk, skb, portid, group, flags, + NULL, NULL); +} + +/** * nlmsg_unicast - unicast a netlink message * @sk: netlink socket to spread message to * @skb: netlink message as socket buffer @@ -1189,7 +1237,7 @@ static inline void *nla_data(const struct nlattr *nla) * nla_len - length of payload * @nla: netlink attribute */ -static inline int nla_len(const struct nlattr *nla) +static inline u16 nla_len(const struct nlattr *nla) { return nla->nla_len - NLA_HDRLEN; } @@ -1358,6 +1406,22 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) } /** + * nla_put_uint - Add a variable-size unsigned int to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_uint(struct sk_buff *skb, int attrtype, u64 value) +{ + u64 tmp64 = value; + u32 tmp32 = value; + + if (tmp64 == tmp32) + return nla_put_u32(skb, attrtype, tmp32); + return nla_put(skb, attrtype, sizeof(u64), &tmp64); +} + +/** * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -1512,6 +1576,22 @@ static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, } /** + * nla_put_sint - Add a variable-size signed int to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_sint(struct sk_buff *skb, int attrtype, s64 value) +{ + s64 tmp64 = value; + s32 tmp32 = value; + + if (tmp64 == tmp32) + return nla_put_s32(skb, attrtype, tmp32); + return nla_put(skb, attrtype, sizeof(s64), &tmp64); +} + +/** * nla_put_string - Add a string netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -1668,6 +1748,17 @@ static inline u64 nla_get_u64(const struct nlattr *nla) } /** + * nla_get_uint - return payload of uint attribute + * @nla: uint netlink attribute + */ +static inline u64 nla_get_uint(const struct nlattr *nla) +{ + if (nla_len(nla) == sizeof(u32)) + return nla_get_u32(nla); + return nla_get_u64(nla); +} + +/** * nla_get_be64 - return payload of __be64 attribute * @nla: __be64 netlink attribute */ @@ -1730,6 +1821,17 @@ static inline s64 nla_get_s64(const struct nlattr *nla) } /** + * nla_get_sint - return payload of uint attribute + * @nla: uint netlink attribute + */ +static inline s64 nla_get_sint(const struct nlattr *nla) +{ + if (nla_len(nla) == sizeof(s32)) + return nla_get_s32(nla); + return nla_get_s64(nla); +} + +/** * nla_get_flag - return payload of flag attribute * @nla: flag netlink attribute */ diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 1f463b3957c7..bae914815aa3 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -107,7 +107,7 @@ struct netns_ct { struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; struct nf_ip_net nf_ct_proto; #if defined(CONFIG_NF_CONNTRACK_LABELS) - unsigned int labels_used; + atomic_t labels_used; #endif }; #endif diff --git a/include/net/netns/core.h b/include/net/netns/core.h index a91ef9f8de60..78214f1b43a2 100644 --- a/include/net/netns/core.h +++ b/include/net/netns/core.h @@ -13,6 +13,7 @@ struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; + int sysctl_optmem_max; u8 sysctl_txrehash; #ifdef CONFIG_PROC_FS diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 7a41c4791536..c356c458b340 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -19,8 +19,7 @@ struct hlist_head; struct fib_table; struct sock; struct local_ports { - seqlock_t lock; - int range[2]; + u32 range; /* high << 16 | low */ bool warned; }; @@ -42,6 +41,38 @@ struct inet_timewait_death_row { struct tcp_fastopen_context; struct netns_ipv4 { + /* Cacheline organization can be found documented in + * Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst. + * Please update the document when adding new fields. + */ + + /* TX readonly hotpath cache lines */ + __cacheline_group_begin(netns_ipv4_read_tx); + u8 sysctl_tcp_early_retrans; + u8 sysctl_tcp_tso_win_divisor; + u8 sysctl_tcp_tso_rtt_log; + u8 sysctl_tcp_autocorking; + int sysctl_tcp_min_snd_mss; + unsigned int sysctl_tcp_notsent_lowat; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_min_rtt_wlen; + int sysctl_tcp_wmem[3]; + u8 sysctl_ip_fwd_use_pmtu; + __cacheline_group_end(netns_ipv4_read_tx); + + /* TXRX readonly hotpath cache lines */ + __cacheline_group_begin(netns_ipv4_read_txrx); + u8 sysctl_tcp_moderate_rcvbuf; + __cacheline_group_end(netns_ipv4_read_txrx); + + /* RX readonly hotpath cache line */ + __cacheline_group_begin(netns_ipv4_read_rx); + u8 sysctl_ip_early_demux; + u8 sysctl_tcp_early_demux; + int sysctl_tcp_reordering; + int sysctl_tcp_rmem[3]; + __cacheline_group_end(netns_ipv4_read_rx); + struct inet_timewait_death_row tcp_death_row; struct udp_table *udp_table; @@ -96,17 +127,14 @@ struct netns_ipv4 { u8 sysctl_ip_default_ttl; u8 sysctl_ip_no_pmtu_disc; - u8 sysctl_ip_fwd_use_pmtu; u8 sysctl_ip_fwd_update_priority; u8 sysctl_ip_nonlocal_bind; u8 sysctl_ip_autobind_reuse; /* Shall we try to damage output packets if routing dev changes? */ u8 sysctl_ip_dynaddr; - u8 sysctl_ip_early_demux; #ifdef CONFIG_NET_L3_MASTER_DEV u8 sysctl_raw_l3mdev_accept; #endif - u8 sysctl_tcp_early_demux; u8 sysctl_udp_early_demux; u8 sysctl_nexthop_compat_mode; @@ -119,7 +147,6 @@ struct netns_ipv4 { u8 sysctl_tcp_mtu_probing; int sysctl_tcp_mtu_probe_floor; int sysctl_tcp_base_mss; - int sysctl_tcp_min_snd_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; @@ -132,17 +159,17 @@ struct netns_ipv4 { u8 sysctl_tcp_syncookies; u8 sysctl_tcp_migrate_req; u8 sysctl_tcp_comp_sack_nr; - int sysctl_tcp_reordering; + u8 sysctl_tcp_backlog_ack_defer; + u8 sysctl_tcp_pingpong_thresh; + u8 sysctl_tcp_retries1; u8 sysctl_tcp_retries2; u8 sysctl_tcp_orphan_retries; u8 sysctl_tcp_tw_reuse; int sysctl_tcp_fin_timeout; - unsigned int sysctl_tcp_notsent_lowat; u8 sysctl_tcp_sack; u8 sysctl_tcp_window_scaling; u8 sysctl_tcp_timestamps; - u8 sysctl_tcp_early_retrans; u8 sysctl_tcp_recovery; u8 sysctl_tcp_thin_linear_timeouts; u8 sysctl_tcp_slow_start_after_idle; @@ -158,21 +185,13 @@ struct netns_ipv4 { u8 sysctl_tcp_frto; u8 sysctl_tcp_nometrics_save; u8 sysctl_tcp_no_ssthresh_metrics_save; - u8 sysctl_tcp_moderate_rcvbuf; - u8 sysctl_tcp_tso_win_divisor; u8 sysctl_tcp_workaround_signed_windows; - int sysctl_tcp_limit_output_bytes; int sysctl_tcp_challenge_ack_limit; - int sysctl_tcp_min_rtt_wlen; u8 sysctl_tcp_min_tso_segs; - u8 sysctl_tcp_tso_rtt_log; - u8 sysctl_tcp_autocorking; u8 sysctl_tcp_reflect_tos; int sysctl_tcp_invalid_ratelimit; int sysctl_tcp_pacing_ss_ratio; int sysctl_tcp_pacing_ca_ratio; - int sysctl_tcp_wmem[3]; - int sysctl_tcp_rmem[3]; unsigned int sysctl_tcp_child_ehash_entries; unsigned long sysctl_tcp_comp_sack_delay_ns; unsigned long sysctl_tcp_comp_sack_slack_ns; diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h index 582212ada3ba..fc752a50f91b 100644 --- a/include/net/netns/smc.h +++ b/include/net/netns/smc.h @@ -22,5 +22,7 @@ struct netns_smc { int sysctl_smcr_testlink_time; int sysctl_wmem; int sysctl_rmem; + int sysctl_max_links_per_lgr; + int sysctl_max_conns_per_lgr; }; #endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index bd7c3be4af5d..423b52eca908 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -50,6 +50,7 @@ struct netns_xfrm { struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; + unsigned int idx_generator; struct hlist_head policy_inexact[XFRM_POLICY_MAX]; struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX]; unsigned int policy_count[XFRM_POLICY_MAX * 2]; diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 2b12725de9c0..d92046a4a078 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -92,7 +92,7 @@ struct nh_res_table { u32 unbalanced_timer; u16 num_nh_buckets; - struct nh_res_bucket nh_buckets[]; + struct nh_res_bucket nh_buckets[] __counted_by(num_nh_buckets); }; struct nh_grp_entry { @@ -126,7 +126,7 @@ struct nh_group { bool has_v4; struct nh_res_table __rcu *res_table; - struct nh_grp_entry nh_entries[]; + struct nh_grp_entry nh_entries[] __counted_by(num_nh); }; struct nexthop { @@ -187,7 +187,7 @@ struct nh_notifier_grp_entry_info { struct nh_notifier_grp_info { u16 num_nh; bool is_fdb; - struct nh_notifier_grp_entry_info nh_entries[]; + struct nh_notifier_grp_entry_info nh_entries[] __counted_by(num_nh); }; struct nh_notifier_res_bucket_info { @@ -200,7 +200,7 @@ struct nh_notifier_res_bucket_info { struct nh_notifier_res_table_info { u16 num_nh_buckets; - struct nh_notifier_single_info nhs[]; + struct nh_notifier_single_info nhs[] __counted_by(num_nh_buckets); }; struct nh_notifier_info { diff --git a/include/net/nl802154.h b/include/net/nl802154.h index 8cd9d141f5af..4c752f799957 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -78,6 +78,10 @@ enum nl802154_commands { NL802154_CMD_SCAN_DONE, NL802154_CMD_SEND_BEACONS, NL802154_CMD_STOP_BEACONS, + NL802154_CMD_ASSOCIATE, + NL802154_CMD_DISASSOCIATE, + NL802154_CMD_SET_MAX_ASSOCIATIONS, + NL802154_CMD_LIST_ASSOCIATIONS, /* add new commands above here */ @@ -147,6 +151,8 @@ enum nl802154_attrs { NL802154_ATTR_SCAN_DURATION, NL802154_ATTR_SCAN_DONE_REASON, NL802154_ATTR_BEACON_INTERVAL, + NL802154_ATTR_MAX_ASSOCIATIONS, + NL802154_ATTR_PEER, /* add attributes here, update the policy in nl802154.c */ @@ -385,8 +391,6 @@ enum nl802154_supported_bool_states { NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1 }; -#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL - enum nl802154_dev_addr_modes { NL802154_DEV_ADDR_NONE, __NL802154_DEV_ADDR_INVALID, @@ -406,12 +410,26 @@ enum nl802154_dev_addr_attrs { NL802154_DEV_ADDR_ATTR_SHORT, NL802154_DEV_ADDR_ATTR_EXTENDED, NL802154_DEV_ADDR_ATTR_PAD, + NL802154_DEV_ADDR_ATTR_PEER_TYPE, /* keep last */ __NL802154_DEV_ADDR_ATTR_AFTER_LAST, NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1 }; +enum nl802154_peer_type { + NL802154_PEER_TYPE_UNSPEC, + + NL802154_PEER_TYPE_PARENT, + NL802154_PEER_TYPE_CHILD, + + /* keep last */ + __NL802154_PEER_TYPE_AFTER_LAST, + NL802154_PEER_TYPE_MAX = __NL802154_PEER_TYPE_AFTER_LAST - 1 +}; + +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + enum nl802154_key_id_modes { NL802154_KEY_ID_MODE_IMPLICIT, NL802154_KEY_ID_MODE_INDEX, diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h index 94231533a369..1d397c1a0043 100644 --- a/include/net/page_pool/helpers.h +++ b/include/net/page_pool/helpers.h @@ -8,23 +8,46 @@ /** * DOC: page_pool allocator * - * The page_pool allocator is optimized for the XDP mode that - * uses one frame per-page, but it can fallback on the - * regular page allocator APIs. - * - * Basic use involves replacing alloc_pages() calls with the - * page_pool_alloc_pages() call. Drivers should use - * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). - * - * API keeps track of in-flight pages, in order to let API user know - * when it is safe to free a page_pool object. Thus, API users - * must call page_pool_put_page() to free the page, or attach - * the page to a page_pool-aware objects like skbs marked with + * The page_pool allocator is optimized for recycling page or page fragment used + * by skb packet and xdp frame. + * + * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(), + * which allocate memory with or without page splitting depending on the + * requested memory size. + * + * If the driver knows that it always requires full pages or its allocations are + * always smaller than half a page, it can use one of the more specific API + * calls: + * + * 1. page_pool_alloc_pages(): allocate memory without page splitting when + * driver knows that the memory it need is always bigger than half of the page + * allocated from page pool. There is no cache line dirtying for 'struct page' + * when a page is recycled back to the page pool. + * + * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver + * knows that the memory it need is always smaller than or equal to half of the + * page allocated from page pool. Page splitting enables memory saving and thus + * avoids TLB/cache miss for data access, but there also is some cost to + * implement page splitting, mainly some cache line dirtying/bouncing for + * 'struct page' and atomic operation for page->pp_ref_count. + * + * The API keeps track of in-flight pages, in order to let API users know when + * it is safe to free a page_pool object, the API users must call + * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or + * attach the page_pool object to a page_pool-aware object like skbs marked with * skb_mark_for_recycle(). * - * API user must call page_pool_put_page() once on a page, as it - * will either recycle the page, or in case of refcnt > 1, it will - * release the DMA mapping and in-flight state accounting. + * page_pool_put_page() may be called multiple times on the same page if a page + * is split into multiple fragments. For the last fragment, it will either + * recycle the page, or in case of page->_refcount > 1, it will release the DMA + * mapping and in-flight state accounting. + * + * dma_sync_single_range_for_device() is only called for the last fragment when + * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the + * last freed fragment to do the sync_for_device operation for all fragments in + * the same page when a page is split. The API user must setup pool->p.max_len + * and pool->p.offset correctly and ensure that page_pool_put_page() is called + * with dma_sync_size being -1 for fragment API. */ #ifndef _NET_PAGE_POOL_HELPERS_H #define _NET_PAGE_POOL_HELPERS_H @@ -32,16 +55,12 @@ #include <net/page_pool/types.h> #ifdef CONFIG_PAGE_POOL_STATS +/* Deprecated driver-facing API, use netlink instead */ int page_pool_ethtool_stats_get_count(void); u8 *page_pool_ethtool_stats_get_strings(u8 *data); u64 *page_pool_ethtool_stats_get(u64 *data, void *stats); -/* - * Drivers that wish to harvest page pool stats and report them to users - * (perhaps via ethtool, debugfs, or another mechanism) can allocate a - * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool. - */ -bool page_pool_get_stats(struct page_pool *pool, +bool page_pool_get_stats(const struct page_pool *pool, struct page_pool_stats *stats); #else static inline int page_pool_ethtool_stats_get_count(void) @@ -73,6 +92,17 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) return page_pool_alloc_pages(pool, gfp); } +/** + * page_pool_dev_alloc_frag() - allocate a page fragment. + * @pool: pool from which to allocate + * @offset: offset to the allocated page + * @size: requested size + * + * Get a page fragment from the page allocator or page_pool caches. + * + * Return: + * Return allocated page fragment, otherwise return NULL. + */ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size) @@ -82,6 +112,91 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, return page_pool_alloc_frag(pool, offset, size, gfp); } +static inline struct page *page_pool_alloc(struct page_pool *pool, + unsigned int *offset, + unsigned int *size, gfp_t gfp) +{ + unsigned int max_size = PAGE_SIZE << pool->p.order; + struct page *page; + + if ((*size << 1) > max_size) { + *size = max_size; + *offset = 0; + return page_pool_alloc_pages(pool, gfp); + } + + page = page_pool_alloc_frag(pool, offset, *size, gfp); + if (unlikely(!page)) + return NULL; + + /* There is very likely not enough space for another fragment, so append + * the remaining size to the current fragment to avoid truesize + * underestimate problem. + */ + if (pool->frag_offset + *size > max_size) { + *size = max_size - *offset; + pool->frag_offset = max_size; + } + + return page; +} + +/** + * page_pool_dev_alloc() - allocate a page or a page fragment. + * @pool: pool from which to allocate + * @offset: offset to the allocated page + * @size: in as the requested size, out as the allocated size + * + * Get a page or a page fragment from the page allocator or page_pool caches + * depending on the requested size in order to allocate memory with least memory + * utilization and performance penalty. + * + * Return: + * Return allocated page or page fragment, otherwise return NULL. + */ +static inline struct page *page_pool_dev_alloc(struct page_pool *pool, + unsigned int *offset, + unsigned int *size) +{ + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + return page_pool_alloc(pool, offset, size, gfp); +} + +static inline void *page_pool_alloc_va(struct page_pool *pool, + unsigned int *size, gfp_t gfp) +{ + unsigned int offset; + struct page *page; + + /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */ + page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM); + if (unlikely(!page)) + return NULL; + + return page_address(page) + offset; +} + +/** + * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its + * va. + * @pool: pool from which to allocate + * @size: in as the requested size, out as the allocated size + * + * This is just a thin wrapper around the page_pool_alloc() API, and + * it returns va of the allocated page or page fragment. + * + * Return: + * Return the va for the allocated page or page fragment, otherwise return NULL. + */ +static inline void *page_pool_dev_alloc_va(struct page_pool *pool, + unsigned int *size) +{ + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + return page_pool_alloc_va(pool, size, gfp); +} + /** * page_pool_get_dma_dir() - Retrieve the stored DMA direction. * @pool: pool from which page was allocated @@ -95,48 +210,82 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) return pool->p.dma_dir; } -/* pp_frag_count represents the number of writers who can update the page - * either by updating skb->data or via DMA mappings for the device. - * We can't rely on the page refcnt for that as we don't know who might be - * holding page references and we can't reliably destroy or sync DMA mappings - * of the fragments. +/** + * page_pool_fragment_page() - split a fresh page into fragments + * @page: page to split + * @nr: references to set + * + * pp_ref_count represents the number of outstanding references to the page, + * which will be freed using page_pool APIs (rather than page allocator APIs + * like put_page()). Such references are usually held by page_pool-aware + * objects like skbs marked for page pool recycling. * - * When pp_frag_count reaches 0 we can either recycle the page if the page - * refcnt is 1 or return it back to the memory allocator and destroy any - * mappings we have. + * This helper allows the caller to take (set) multiple references to a + * freshly allocated page. The page must be freshly allocated (have a + * pp_ref_count of 1). This is commonly done by drivers and + * "fragment allocators" to save atomic operations - either when they know + * upfront how many references they will need; or to take MAX references and + * return the unused ones with a single atomic dec(), instead of performing + * multiple atomic inc() operations. */ static inline void page_pool_fragment_page(struct page *page, long nr) { - atomic_long_set(&page->pp_frag_count, nr); + atomic_long_set(&page->pp_ref_count, nr); } -static inline long page_pool_defrag_page(struct page *page, long nr) +static inline long page_pool_unref_page(struct page *page, long nr) { long ret; - /* If nr == pp_frag_count then we have cleared all remaining - * references to the page. No need to actually overwrite it, instead - * we can leave this to be overwritten by the calling function. + /* If nr == pp_ref_count then we have cleared all remaining + * references to the page: + * 1. 'n == 1': no need to actually overwrite it. + * 2. 'n != 1': overwrite it with one, which is the rare case + * for pp_ref_count draining. * - * The main advantage to doing this is that an atomic_read is - * generally a much cheaper operation than an atomic update, - * especially when dealing with a page that may be partitioned - * into only 2 or 3 pieces. + * The main advantage to doing this is that not only we avoid a atomic + * update, as an atomic_read is generally a much cheaper operation than + * an atomic update, especially when dealing with a page that may be + * referenced by only 2 or 3 users; but also unify the pp_ref_count + * handling by ensuring all pages have partitioned into only 1 piece + * initially, and only overwrite it when the page is partitioned into + * more than one piece. */ - if (atomic_long_read(&page->pp_frag_count) == nr) + if (atomic_long_read(&page->pp_ref_count) == nr) { + /* As we have ensured nr is always one for constant case using + * the BUILD_BUG_ON(), only need to handle the non-constant case + * here for pp_ref_count draining, which is a rare case. + */ + BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1); + if (!__builtin_constant_p(nr)) + atomic_long_set(&page->pp_ref_count, 1); + return 0; + } - ret = atomic_long_sub_return(nr, &page->pp_frag_count); + ret = atomic_long_sub_return(nr, &page->pp_ref_count); WARN_ON(ret < 0); + + /* We are the last user here too, reset pp_ref_count back to 1 to + * ensure all pages have been partitioned into 1 piece initially, + * this should be the rare case when the last two fragment users call + * page_pool_unref_page() currently. + */ + if (unlikely(!ret)) + atomic_long_set(&page->pp_ref_count, 1); + return ret; } -static inline bool page_pool_is_last_frag(struct page_pool *pool, - struct page *page) +static inline void page_pool_ref_page(struct page *page) { - /* If fragments aren't enabled or count is 0 we were the last user */ - return !(pool->p.flags & PP_FLAG_PAGE_FRAG) || - (page_pool_defrag_page(page, 1) == 0); + atomic_long_inc(&page->pp_ref_count); +} + +static inline bool page_pool_is_last_ref(struct page *page) +{ + /* If page_pool_unref_page() returns 0, we were the last user */ + return page_pool_unref_page(page, 1) == 0; } /** @@ -161,10 +310,10 @@ static inline void page_pool_put_page(struct page_pool *pool, * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL - if (!page_pool_is_last_frag(pool, page)) + if (!page_pool_is_last_ref(page)) return; - page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct); + page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct); #endif } @@ -197,10 +346,24 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, page_pool_put_full_page(pool, page, true); } -#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ +#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \ (sizeof(dma_addr_t) > sizeof(unsigned long)) /** + * page_pool_free_va() - free a va into the page_pool + * @pool: pool from which va was allocated + * @va: va to be freed + * @allow_direct: freed by the consumer, allow lockless caching + * + * Free a va allocated from page_pool_allo_va(). + */ +static inline void page_pool_free_va(struct page_pool *pool, void *va, + bool allow_direct) +{ + page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); +} + +/** * page_pool_get_dma_addr() - Retrieve the stored DMA address. * @page: page allocated from a page pool * @@ -211,17 +374,25 @@ static inline dma_addr_t page_pool_get_dma_addr(struct page *page) { dma_addr_t ret = page->dma_addr; - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) - ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) + ret <<= PAGE_SHIFT; return ret; } -static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) +static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) { + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) { + page->dma_addr = addr >> PAGE_SHIFT; + + /* We assume page alignment to shave off bottom bits, + * if this "compression" doesn't work we need to drop. + */ + return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT; + } + page->dma_addr = addr; - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) - page->dma_addr_upper = upper_32_bits(addr); + return false; } static inline bool page_pool_put(struct page_pool *pool) diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 887e7946a597..76481c465375 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -5,6 +5,7 @@ #include <linux/dma-direction.h> #include <linux/ptr_ring.h> +#include <linux/types.h> #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA * map/unmap @@ -17,10 +18,8 @@ * Please note DMA-sync-for-CPU is still * device driver responsibility */ -#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */ #define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\ - PP_FLAG_DMA_SYNC_DEV |\ - PP_FLAG_PAGE_FRAG) + PP_FLAG_DMA_SYNC_DEV) /* * Fast allocation side cache array/stack @@ -45,29 +44,35 @@ struct pp_alloc_cache { /** * struct page_pool_params - page pool parameters - * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG + * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV * @order: 2^order pages on allocation * @pool_size: size of the ptr_ring * @nid: NUMA node id to allocate from pages from * @dev: device, for DMA pre-mapping purposes + * @netdev: netdev this pool will serve (leave as NULL if none or multiple) * @napi: NAPI which is the sole consumer of pages, otherwise NULL * @dma_dir: DMA mapping direction * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV */ struct page_pool_params { - unsigned int flags; - unsigned int order; - unsigned int pool_size; - int nid; - struct device *dev; - struct napi_struct *napi; - enum dma_data_direction dma_dir; - unsigned int max_len; - unsigned int offset; + struct_group_tagged(page_pool_params_fast, fast, + unsigned int flags; + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; + ); + struct_group_tagged(page_pool_params_slow, slow, + struct net_device *netdev; /* private: used by test code only */ - void (*init_callback)(struct page *page, void *arg); - void *init_arg; + void (*init_callback)(struct page *page, void *arg); + void *init_arg; + ); }; #ifdef CONFIG_PAGE_POOL_STATS @@ -121,7 +126,9 @@ struct page_pool_stats { #endif struct page_pool { - struct page_pool_params p; + struct page_pool_params_fast p; + + bool has_init_callback; long frag_users; struct page *frag_page; @@ -180,6 +187,16 @@ struct page_pool { refcount_t user_cnt; u64 destroy_cnt; + + /* Slow/Control-path information follows */ + struct page_pool_params_slow slow; + /* User-facing fields, protected by page_pools_lock */ + struct { + struct hlist_node list; + u64 detach_time; + u32 napi_id; + u32 id; + } user; }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); @@ -217,9 +234,9 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, } #endif -void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, - unsigned int dma_sync_size, - bool allow_direct); +void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, + unsigned int dma_sync_size, + bool allow_direct); static inline bool is_page_pool_compiled_in(void) { diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 15960564e0c3..1e200d9a066d 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -20,10 +20,10 @@ struct qdisc_walker { int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); }; -static inline void *qdisc_priv(struct Qdisc *q) -{ - return &q->privdata; -} +#define qdisc_priv(q) \ + _Generic(q, \ + const struct Qdisc * : (const void *)&q->privdata, \ + struct Qdisc * : (void *)&q->privdata) static inline struct Qdisc *qdisc_from_priv(void *priv) { @@ -275,24 +275,6 @@ static inline void skb_txtime_consumed(struct sk_buff *skb) skb->tstamp = ktime_set(0, 0); } -struct tc_skb_cb { - struct qdisc_skb_cb qdisc_cb; - - u16 mru; - u8 post_ct:1; - u8 post_ct_snat:1; - u8 post_ct_dnat:1; - u16 zone; /* Only valid if post_ct = true */ -}; - -static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb) -{ - struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb; - - BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); - return cb; -} - static inline bool tc_qdisc_stats_dump(struct Qdisc *sch, unsigned long cl, struct qdisc_walker *arg) diff --git a/include/net/regulatory.h b/include/net/regulatory.h index b2cb4a9eb04d..ebf9e028d1ef 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -213,6 +213,7 @@ struct ieee80211_reg_rule { u32 flags; u32 dfs_cac_ms; bool has_wmm; + s8 psd; }; struct ieee80211_regdomain { diff --git a/include/net/route.h b/include/net/route.h index 51a45b1887b5..980ab474eabd 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -37,7 +37,7 @@ #define RTO_ONLINK 0x01 -#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE)) +#define RT_CONN_FLAGS(sk) (RT_TOS(READ_ONCE(inet_sk(sk)->tos)) | sock_flag(sk, SOCK_LOCALROUTE)) #define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE)) static inline __u8 ip_sock_rt_scope(const struct sock *sk) @@ -50,7 +50,7 @@ static inline __u8 ip_sock_rt_scope(const struct sock *sk) static inline __u8 ip_sock_rt_tos(const struct sock *sk) { - return RT_TOS(inet_sk(sk)->tos); + return RT_TOS(READ_ONCE(inet_sk(sk)->tos)); } struct ip_tunnel_info; @@ -136,12 +136,6 @@ static inline struct rtable *__ip_route_output_key(struct net *net, struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, const struct sock *sk); -struct rtable *ip_route_output_tunnel(struct sk_buff *skb, - struct net_device *dev, - struct net *net, __be32 *saddr, - const struct ip_tunnel_info *info, - u8 protocol, bool use_cache); - struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index f232512505f8..934fdb977551 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -19,6 +19,7 @@ #include <net/gen_stats.h> #include <net/rtnetlink.h> #include <net/flow_offload.h> +#include <linux/xarray.h> struct Qdisc_ops; struct qdisc_walker; @@ -324,7 +325,6 @@ struct Qdisc_ops { struct module *owner; }; - struct tcf_result { union { struct { @@ -332,7 +332,6 @@ struct tcf_result { u32 classid; }; const struct tcf_proto *goto_tp; - }; }; @@ -376,6 +375,10 @@ struct tcf_proto_ops { struct nlattr **tca, struct netlink_ext_ack *extack); void (*tmplt_destroy)(void *tmplt_priv); + void (*tmplt_reoffload)(struct tcf_chain *chain, + bool add, + flow_setup_cb_t *cb, + void *cb_priv); struct tcf_exts * (*get_exts)(const struct tcf_proto *tp, u32 handle); @@ -458,6 +461,7 @@ struct tcf_chain { }; struct tcf_block { + struct xarray ports; /* datapath accessible */ /* Lock protects tcf_block and lifetime-management data of chains * attached to the block (refcnt, action_refcnt, explicitly_created). */ @@ -484,6 +488,8 @@ struct tcf_block { struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ }; +struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index); + static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) { return lockdep_is_held(&chain->filter_chain_lock); @@ -587,6 +593,7 @@ static inline void sch_tree_unlock(struct Qdisc *q) extern struct Qdisc noop_qdisc; extern struct Qdisc_ops noop_qdisc_ops; extern struct Qdisc_ops pfifo_fast_ops; +extern const u8 sch_default_prio2band[TC_PRIO_MAX + 1]; extern struct Qdisc_ops mq_qdisc_ops; extern struct Qdisc_ops noqueue_qdisc_ops; extern const struct Qdisc_ops *default_qdisc_ops; @@ -1037,6 +1044,37 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) return skb; } +struct tc_skb_cb { + struct qdisc_skb_cb qdisc_cb; + u32 drop_reason; + + u16 zone; /* Only valid if post_ct = true */ + u16 mru; + u8 post_ct:1; + u8 post_ct_snat:1; + u8 post_ct_dnat:1; +}; + +static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb) +{ + struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb; + + BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); + return cb; +} + +static inline enum skb_drop_reason +tcf_get_drop_reason(const struct sk_buff *skb) +{ + return tc_skb_cb(skb)->drop_reason; +} + +static inline void tcf_set_drop_reason(const struct sk_buff *skb, + enum skb_drop_reason reason) +{ + tc_skb_cb(skb)->drop_reason = reason; +} + /* Instead of calling kfree_skb() while root qdisc lock is held, * queue the skb for future freeing at end of __dev_xmit_skb() */ diff --git a/include/net/scm.h b/include/net/scm.h index c5bcdf65f55c..cf68acec4d70 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -5,10 +5,12 @@ #include <linux/limits.h> #include <linux/net.h> #include <linux/cred.h> +#include <linux/file.h> #include <linux/security.h> #include <linux/pid.h> #include <linux/nsproxy.h> #include <linux/sched/signal.h> +#include <net/compat.h> /* Well, we should have at least one descriptor open * to accept passed FDs 8) @@ -123,14 +125,17 @@ static inline bool scm_has_secdata(struct socket *sock) static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm) { struct file *pidfd_file = NULL; - int pidfd; + int len, pidfd; - /* - * put_cmsg() doesn't return an error if CMSG is truncated, + /* put_cmsg() doesn't return an error if CMSG is truncated, * that's why we need to opencode these checks here. */ - if ((msg->msg_controllen <= sizeof(struct cmsghdr)) || - (msg->msg_controllen - sizeof(struct cmsghdr)) < sizeof(int)) { + if (msg->msg_flags & MSG_CMSG_COMPAT) + len = sizeof(struct compat_cmsghdr) + sizeof(int); + else + len = sizeof(struct cmsghdr) + sizeof(int); + + if (msg->msg_controllen < len) { msg->msg_flags |= MSG_CTRUNC; return; } @@ -204,5 +209,13 @@ static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg, scm_destroy_cred(scm); } +static inline int scm_recv_one_fd(struct file *f, int __user *ufd, + unsigned int flags) +{ + if (!ufd) + return -EFAULT; + return receive_fd(f, ufd, flags); +} + #endif /* __LINUX_NET_SCM_H */ diff --git a/include/net/smc.h b/include/net/smc.h index a002552be29c..c9dcb30e3fd9 100644 --- a/include/net/smc.h +++ b/include/net/smc.h @@ -52,9 +52,14 @@ struct smcd_dmb { struct smcd_dev; struct ism_client; +struct smcd_gid { + u64 gid; + u64 gid_ext; +}; + struct smcd_ops { - int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid, - u32 vid); + int (*query_remote_gid)(struct smcd_dev *dev, struct smcd_gid *rgid, + u32 vid_valid, u32 vid); int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb, struct ism_client *client); int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb); @@ -62,14 +67,13 @@ struct smcd_ops { int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id); int (*set_vlan_required)(struct smcd_dev *dev); int (*reset_vlan_required)(struct smcd_dev *dev); - int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq, - u32 event_code, u64 info); + int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid, + u32 trigger_irq, u32 event_code, u64 info); int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx, bool sf, unsigned int offset, void *data, unsigned int size); int (*supports_v2)(void); - u8* (*get_system_eid)(void); - u64 (*get_local_gid)(struct smcd_dev *dev); + void (*get_local_gid)(struct smcd_dev *dev, struct smcd_gid *gid); u16 (*get_chid)(struct smcd_dev *dev); struct device* (*get_dev)(struct smcd_dev *dev); }; diff --git a/include/net/sock.h b/include/net/sock.h index 11d503417591..54ca8dcbfb43 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -76,19 +76,6 @@ * the other protocols. */ -/* Define this to get the SOCK_DBG debugging facility. */ -#define SOCK_DEBUGGING -#ifdef SOCK_DEBUGGING -#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ - printk(KERN_DEBUG msg); } while (0) -#else -/* Validate arguments and do nothing */ -static inline __printf(2, 3) -void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) -{ -} -#endif - /* This is the per-socket lock. The spinlock provides a synchronization * between user contexts and software interrupt processing, whereas the * mini-semaphore synchronizes multiple users amongst themselves. @@ -277,8 +264,6 @@ struct sk_filter; * @sk_pacing_status: Pacing status (requested, handled by sch_fq) * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) * @sk_sndbuf: size of send buffer in bytes - * @__sk_flags_offset: empty field used to determine location of bitfield - * @sk_padding: unused element for alignment * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets * @sk_no_check_rx: allow zero checksum in RX packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) @@ -336,7 +321,7 @@ struct sk_filter; * @sk_cgrp_data: cgroup data for this cgroup * @sk_memcg: this socket's memory cgroup association * @sk_write_pending: a write to stream socket waits to start - * @sk_wait_pending: number of threads blocked on this socket + * @sk_disconnects: number of disconnect operations performed on this sock * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed * @sk_write_space: callback to indicate there is bf sending space available @@ -352,7 +337,6 @@ struct sk_filter; * @sk_txtime_report_errors: set report errors mode for SO_TXTIME * @sk_txtime_unused: unused txtime flags * @ns_tracker: tracker for netns reference - * @sk_bind2_node: bind node in the bhash2 table */ struct sock { /* @@ -429,7 +413,7 @@ struct sock { unsigned int sk_napi_id; #endif int sk_rcvbuf; - int sk_wait_pending; + int sk_disconnects; struct sk_filter __rcu *sk_filter; union { @@ -544,7 +528,6 @@ struct sock { #endif struct rcu_head sk_rcu; netns_tracker ns_tracker; - struct hlist_node sk_bind2_node; }; enum sk_pacing { @@ -873,16 +856,6 @@ static inline void sk_add_bind_node(struct sock *sk, hlist_add_head(&sk->sk_bind_node, list); } -static inline void __sk_del_bind2_node(struct sock *sk) -{ - __hlist_del(&sk->sk_bind2_node); -} - -static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list) -{ - hlist_add_head(&sk->sk_bind2_node, list); -} - #define sk_for_each(__sk, list) \ hlist_for_each_entry(__sk, list, sk_node) #define sk_for_each_rcu(__sk, list) \ @@ -900,8 +873,6 @@ static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list) hlist_for_each_entry_safe(__sk, tmp, list, sk_node) #define sk_for_each_bound(__sk, list) \ hlist_for_each_entry(__sk, list, sk_bind_node) -#define sk_for_each_bound_bhash2(__sk, list) \ - hlist_for_each_entry(__sk, list, sk_bind2_node) /** * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset @@ -1053,6 +1024,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val) WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val); } +static inline void sk_forward_alloc_add(struct sock *sk, int val) +{ + /* Paired with lockless reads of sk->sk_forward_alloc */ + WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val); +} + void sk_stream_write_space(struct sock *sk); /* OOB backlog add */ @@ -1183,8 +1160,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk) } #define sk_wait_event(__sk, __timeo, __condition, __wait) \ - ({ int __rc; \ - __sk->sk_wait_pending++; \ + ({ int __rc, __dis = __sk->sk_disconnects; \ release_sock(__sk); \ __rc = __condition; \ if (!__rc) { \ @@ -1194,8 +1170,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk) } \ sched_annotate_sleep(); \ lock_sock(__sk); \ - __sk->sk_wait_pending--; \ - __rc = __condition; \ + __rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \ __rc; \ }) @@ -1377,7 +1352,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk) if (sk->sk_prot->forward_alloc_get) return sk->sk_prot->forward_alloc_get(sk); #endif - return sk->sk_forward_alloc; + return READ_ONCE(sk->sk_forward_alloc); } static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) @@ -1673,14 +1648,14 @@ static inline void sk_mem_charge(struct sock *sk, int size) { if (!sk_has_account(sk)) return; - sk->sk_forward_alloc -= size; + sk_forward_alloc_add(sk, -size); } static inline void sk_mem_uncharge(struct sock *sk, int size) { if (!sk_has_account(sk)) return; - sk->sk_forward_alloc += size; + sk_forward_alloc_add(sk, size); sk_mem_reclaim(sk); } @@ -1817,12 +1792,11 @@ static inline bool sock_owned_by_user_nocheck(const struct sock *sk) static inline void sock_release_ownership(struct sock *sk) { - if (sock_owned_by_user_nocheck(sk)) { - sk->sk_lock.owned = 0; + DEBUG_NET_WARN_ON_ONCE(!sock_owned_by_user_nocheck(sk)); + sk->sk_lock.owned = 0; - /* The sk_lock has mutex_unlock() semantics: */ - mutex_release(&sk->sk_lock.dep_map, _RET_IP_); - } + /* The sk_lock has mutex_unlock() semantics: */ + mutex_release(&sk->sk_lock.dep_map, _RET_IP_); } /* no reclassification while locks are held */ @@ -1861,11 +1835,13 @@ int sk_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int sock_setsockopt(struct socket *sock, int level, int op, sockptr_t optval, unsigned int optlen); +int do_sock_setsockopt(struct socket *sock, bool compat, int level, + int optname, sockptr_t optval, int optlen); +int do_sock_getsockopt(struct socket *sock, bool compat, int level, + int optname, sockptr_t optval, sockptr_t optlen); int sk_getsockopt(struct sock *sk, int level, int optname, sockptr_t optval, sockptr_t optlen); -int sock_getsockopt(struct socket *sock, int level, int op, - char __user *optval, int __user *optlen); int sock_gettstamp(struct socket *sock, void __user *userstamp, bool timeval, bool time32); struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, @@ -1900,7 +1876,9 @@ struct sockcm_cookie { static inline void sockcm_init(struct sockcm_cookie *sockc, const struct sock *sk) { - *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags }; + *sockc = (struct sockcm_cookie) { + .tsflags = READ_ONCE(sk->sk_tsflags) + }; } int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, @@ -2000,21 +1978,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) /* sk_tx_queue_mapping accept only upto a 16-bit value */ if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX)) return; - sk->sk_tx_queue_mapping = tx_queue; + /* Paired with READ_ONCE() in sk_tx_queue_get() and + * other WRITE_ONCE() because socket lock might be not held. + */ + WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue); } #define NO_QUEUE_MAPPING USHRT_MAX static inline void sk_tx_queue_clear(struct sock *sk) { - sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING; + /* Paired with READ_ONCE() in sk_tx_queue_get() and + * other WRITE_ONCE() because socket lock might be not held. + */ + WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING); } static inline int sk_tx_queue_get(const struct sock *sk) { - if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING) - return sk->sk_tx_queue_mapping; + if (sk) { + /* Paired with WRITE_ONCE() in sk_tx_queue_clear() + * and sk_tx_queue_set(). + */ + int val = READ_ONCE(sk->sk_tx_queue_mapping); + if (val != NO_QUEUE_MAPPING) + return val; + } return -1; } @@ -2134,14 +2124,14 @@ static inline bool sk_rethink_txhash(struct sock *sk) } static inline struct dst_entry * -__sk_dst_get(struct sock *sk) +__sk_dst_get(const struct sock *sk) { return rcu_dereference_check(sk->sk_dst_cache, lockdep_sock_is_held(sk)); } static inline struct dst_entry * -sk_dst_get(struct sock *sk) +sk_dst_get(const struct sock *sk) { struct dst_entry *dst; @@ -2163,7 +2153,7 @@ static inline void __dst_negative_advice(struct sock *sk) if (ndst != dst) { rcu_assign_pointer(sk->sk_dst_cache, ndst); sk_tx_queue_clear(sk); - sk->sk_dst_pending_confirm = 0; + WRITE_ONCE(sk->sk_dst_pending_confirm, 0); } } } @@ -2180,7 +2170,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old_dst; sk_tx_queue_clear(sk); - sk->sk_dst_pending_confirm = 0; + WRITE_ONCE(sk->sk_dst_pending_confirm, 0); old_dst = rcu_dereference_protected(sk->sk_dst_cache, lockdep_sock_is_held(sk)); rcu_assign_pointer(sk->sk_dst_cache, dst); @@ -2193,7 +2183,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old_dst; sk_tx_queue_clear(sk); - sk->sk_dst_pending_confirm = 0; + WRITE_ONCE(sk->sk_dst_pending_confirm, 0); old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); dst_release(old_dst); } @@ -2231,7 +2221,7 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) } } -bool sk_mc_loop(struct sock *sk); +bool sk_mc_loop(const struct sock *sk); static inline bool sk_can_gso(const struct sock *sk) { @@ -2695,9 +2685,9 @@ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, static inline void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { - ktime_t kt = skb->tstamp; struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); - + u32 tsflags = READ_ONCE(sk->sk_tsflags); + ktime_t kt = skb->tstamp; /* * generate control messages if * - receive time stamping in software requested @@ -2705,10 +2695,10 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) * - hardware time stamps available and wanted */ if (sock_flag(sk, SOCK_RCVTSTAMP) || - (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || - (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || + (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || + (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) || (hwtstamps->hwtstamp && - (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) + (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) __sock_recv_timestamp(msg, sk, skb); else sock_write_timestamp(sk, kt); @@ -2730,7 +2720,8 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \ SOF_TIMESTAMPING_RAW_HARDWARE) - if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY) + if (sk->sk_flags & FLAGS_RECV_CMSGS || + READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY) __sock_recv_cmsgs(msg, sk, skb); else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) sock_write_timestamp(sk, skb->tstamp); @@ -2774,9 +2765,30 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) &skb_shinfo(skb)->tskey); } +static inline bool sk_is_inet(const struct sock *sk) +{ + int family = READ_ONCE(sk->sk_family); + + return family == AF_INET || family == AF_INET6; +} + static inline bool sk_is_tcp(const struct sock *sk) { - return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; + return sk_is_inet(sk) && + sk->sk_type == SOCK_STREAM && + sk->sk_protocol == IPPROTO_TCP; +} + +static inline bool sk_is_udp(const struct sock *sk) +{ + return sk_is_inet(sk) && + sk->sk_type == SOCK_DGRAM && + sk->sk_protocol == IPPROTO_UDP; +} + +static inline bool sk_is_stream_unix(const struct sock *sk) +{ + return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM; } /** @@ -2900,7 +2912,6 @@ extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; extern int sysctl_tstamp_allow_data; -extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h index b24ea2d9400b..77f87c622a2e 100644 --- a/include/net/tc_act/tc_ct.h +++ b/include/net/tc_act/tc_ct.h @@ -22,6 +22,7 @@ struct tcf_ct_params { struct nf_nat_range2 range; bool ipv4_range; + bool put_labels; u16 ct_action; @@ -57,6 +58,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a) return to_ct_params(a)->nf_ft; } +static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a) +{ + return to_ct_params(a)->helper; +} + #else static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; } static inline int tcf_ct_action(const struct tc_action *a) { return 0; } @@ -64,6 +70,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a) { return NULL; } +static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a) +{ + return NULL; +} #endif /* CONFIG_NF_CONNTRACK */ #if IS_ENABLED(CONFIG_NET_ACT_CT) diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h deleted file mode 100644 index 4225fcb1c6ba..000000000000 --- a/include/net/tc_act/tc_ipt.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __NET_TC_IPT_H -#define __NET_TC_IPT_H - -#include <net/act_api.h> - -struct xt_entry_target; - -struct tcf_ipt { - struct tc_action common; - u32 tcfi_hook; - char *tcfi_tname; - struct xt_entry_target *tcfi_t; -}; -#define to_ipt(a) ((struct tcf_ipt *)a) - -#endif /* __NET_TC_IPT_H */ diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h index 32ce8ea36950..75722d967bf2 100644 --- a/include/net/tc_act/tc_mirred.h +++ b/include/net/tc_act/tc_mirred.h @@ -8,6 +8,7 @@ struct tcf_mirred { struct tc_action common; int tcfm_eaction; + u32 tcfm_blockid; bool tcfm_mac_header_xmit; struct net_device __rcu *tcfm_dev; netdevice_tracker tcfm_dev_tracker; diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h index a6d481b5bcbc..a608546bcefc 100644 --- a/include/net/tc_wrapper.h +++ b/include/net/tc_wrapper.h @@ -117,10 +117,6 @@ static inline int tc_act(struct sk_buff *skb, const struct tc_action *a, if (a->ops->act == tcf_ife_act) return tcf_ife_act(skb, a, res); #endif -#if IS_BUILTIN(CONFIG_NET_ACT_IPT) - if (a->ops->act == tcf_ipt_act) - return tcf_ipt_act(skb, a, res); -#endif #if IS_BUILTIN(CONFIG_NET_ACT_SIMP) if (a->ops->act == tcf_simp_act) return tcf_simp_act(skb, a, res); diff --git a/include/net/tcp.h b/include/net/tcp.h index 91688d0dadcd..dd78a1181031 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -37,6 +37,7 @@ #include <net/snmp.h> #include <net/ip.h> #include <net/tcp_states.h> +#include <net/tcp_ao.h> #include <net/inet_ecn.h> #include <net/dst.h> #include <net/mptcp.h> @@ -131,6 +132,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */ #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ +static_assert((1 << ATO_BITS) > TCP_DELACK_MAX); + #if HZ >= 100 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ #define TCP_ATO_MIN ((unsigned)(HZ/25)) @@ -141,6 +144,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCP_RTO_MAX ((unsigned)(120*HZ)) #define TCP_RTO_MIN ((unsigned)(HZ/5)) #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ + +#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */ + #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now * used as a fallback RTO for the @@ -161,7 +167,12 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_KEEPCNT 127 #define MAX_TCP_SYNCNT 127 -#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) +/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds + * to avoid overflows. This assumes a clock smaller than 1 Mhz. + * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz. + */ +#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC) + #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated * after this time. It should be equal * (or greater than) TCP_TIMEWAIT_LEN @@ -184,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ +#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */ #define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */ #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ #define TCPOPT_EXP 254 /* Experimental */ @@ -348,12 +360,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, bool force_schedule); -static inline void tcp_dec_quickack_mode(struct sock *sk, - const unsigned int pkts) +static inline void tcp_dec_quickack_mode(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ack.quick) { + /* How many ACKs S/ACKing new data have we sent? */ + const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0; + if (pkts >= icsk->icsk_ack.quick) { icsk->icsk_ack.quick = 0; /* Leaving quickack mode we deflate ATO. */ @@ -424,7 +438,6 @@ int tcp_mmap(struct file *file, struct socket *sock, void tcp_parse_options(const struct net *net, const struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc); -const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); /* * BPF SKB-less helpers @@ -477,13 +490,14 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); /* From syncookies.c */ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst, u32 tsoff); -int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, - u32 cookie); + struct dst_entry *dst); +int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, - const struct tcp_request_sock_ops *af_ops, - struct sock *sk, struct sk_buff *skb); + struct sock *sk, struct sk_buff *skb, + struct tcp_options_received *tcp_opt, + int mss, u32 tsoff); + #ifdef CONFIG_SYN_COOKIES /* Syncookies use a monotonic timer which increments every 60 seconds. @@ -569,12 +583,15 @@ __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); u64 cookie_init_timestamp(struct request_sock *req, u64 now); bool cookie_timestamp_decode(const struct net *net, struct tcp_options_received *opt); -bool cookie_ecn_ok(const struct tcp_options_received *opt, - const struct net *net, const struct dst_entry *dst); + +static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst) +{ + return READ_ONCE(net->ipv4.sysctl_tcp_ecn) || + dst_feature(dst, RTAX_FEATURE_ECN); +} /* From net/ipv6/syncookies.c */ -int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, - u32 cookie); +int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th); struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, @@ -718,8 +735,10 @@ static inline void tcp_fast_path_check(struct sock *sk) tcp_fast_path_on(tp); } +u32 tcp_delack_max(const struct sock *sk); + /* Compute the actual rto_min value */ -static inline u32 tcp_rto_min(struct sock *sk) +static inline u32 tcp_rto_min(const struct sock *sk) { const struct dst_entry *dst = __sk_dst_get(sk); u32 rto_min = inet_csk(sk)->icsk_rto_min; @@ -729,7 +748,7 @@ static inline u32 tcp_rto_min(struct sock *sk) return rto_min; } -static inline u32 tcp_rto_min_us(struct sock *sk) +static inline u32 tcp_rto_min_us(const struct sock *sk) { return jiffies_to_usecs(tcp_rto_min(sk)); } @@ -789,22 +808,31 @@ static inline u64 tcp_clock_us(void) return div_u64(tcp_clock_ns(), NSEC_PER_USEC); } -/* This should only be used in contexts where tp->tcp_mstamp is up to date */ -static inline u32 tcp_time_stamp(const struct tcp_sock *tp) +static inline u64 tcp_clock_ms(void) { - return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ); + return div_u64(tcp_clock_ns(), NSEC_PER_MSEC); } -/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */ -static inline u32 tcp_ns_to_ts(u64 ns) +/* TCP Timestamp included in TS option (RFC 1323) can either use ms + * or usec resolution. Each socket carries a flag to select one or other + * resolution, as the route attribute could change anytime. + * Each flow must stick to initial resolution. + */ +static inline u32 tcp_clock_ts(bool usec_ts) { - return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ); + return usec_ts ? tcp_clock_us() : tcp_clock_ms(); } -/* Could use tcp_clock_us() / 1000, but this version uses a single divide */ -static inline u32 tcp_time_stamp_raw(void) +static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp) { - return tcp_ns_to_ts(tcp_clock_ns()); + return div_u64(tp->tcp_mstamp, USEC_PER_MSEC); +} + +static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp) +{ + if (tp->tcp_usec_ts) + return tp->tcp_mstamp; + return tcp_time_stamp_ms(tp); } void tcp_mstamp_refresh(struct tcp_sock *tp); @@ -814,17 +842,30 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) return max_t(s64, t1 - t0, 0); } -static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) -{ - return tcp_ns_to_ts(skb->skb_mstamp_ns); -} - /* provide the departure time in us unit */ static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb) { return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC); } +/* Provide skb TSval in usec or ms unit */ +static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb) +{ + if (usec_ts) + return tcp_skb_timestamp_us(skb); + + return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC); +} + +static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw) +{ + return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset; +} + +static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq) +{ + return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off; +} #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) @@ -1453,13 +1494,15 @@ static inline int tcp_space_from_win(const struct sock *sk, int win) return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win); } +/* Assume a conservative default of 1200 bytes of payload per 4K page. + * This may be adjusted later in tcp_measure_rcv_mss(). + */ +#define TCP_DEFAULT_SCALING_RATIO ((1200 << TCP_RMEM_TO_WIN_SCALE) / \ + SKB_TRUESIZE(4096)) + static inline void tcp_scaling_ratio_init(struct sock *sk) { - /* Assume a conservative default of 1200 bytes of payload per 4K page. - * This may be adjusted later in tcp_measure_rcv_mss(). - */ - tcp_sk(sk)->scaling_ratio = (1200 << TCP_RMEM_TO_WIN_SCALE) / - SKB_TRUESIZE(4096); + tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; } /* Note: caller must be prepared to deal with negative returns */ @@ -1475,17 +1518,22 @@ static inline int tcp_full_space(const struct sock *sk) return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); } -static inline void tcp_adjust_rcv_ssthresh(struct sock *sk) +static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh) { int unused_mem = sk_unused_reserved_mem(sk); struct tcp_sock *tp = tcp_sk(sk); - tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); + tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh); if (unused_mem) tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh, tcp_win_from_space(sk, unused_mem)); } +static inline void tcp_adjust_rcv_ssthresh(struct sock *sk) +{ + __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss); +} + void tcp_cleanup_rbuf(struct sock *sk, int copied); void __tcp_cleanup_rbuf(struct sock *sk, int copied); @@ -1590,7 +1638,7 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) return true; if (unlikely(!time_before32(ktime_get_seconds(), - rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))) + rx_opt->ts_recent_stamp + TCP_PAWS_WRAP))) return true; /* * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, @@ -1650,12 +1698,7 @@ static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) tp->retransmit_skb_hint = NULL; } -union tcp_md5_addr { - struct in_addr a4; -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr a6; -#endif -}; +#define tcp_md5_addr tcp_ao_addr /* - key database */ struct tcp_md5sig_key { @@ -1699,12 +1742,39 @@ union tcp_md5sum_block { #endif }; -/* - pool: digest algorithm, hash description and scratch buffer */ -struct tcp_md5sig_pool { - struct ahash_request *md5_req; - void *scratch; +/* + * struct tcp_sigpool - per-CPU pool of ahash_requests + * @scratch: per-CPU temporary area, that can be used between + * tcp_sigpool_start() and tcp_sigpool_end() to perform + * crypto request + * @req: pre-allocated ahash request + */ +struct tcp_sigpool { + void *scratch; + struct ahash_request *req; }; +int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size); +void tcp_sigpool_get(unsigned int id); +void tcp_sigpool_release(unsigned int id); +int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp, + const struct sk_buff *skb, + unsigned int header_len); + +/** + * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash + * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash() + * @c: returned tcp_sigpool for usage (uninitialized on failure) + * + * Returns 0 on success, error otherwise. + */ +int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c); +/** + * tcp_sigpool_end - enable bh and stop using tcp_sigpool + * @c: tcp_sigpool context that was returned by tcp_sigpool_start() + */ +void tcp_sigpool_end(struct tcp_sigpool *c); +size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len); /* - functions */ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb); @@ -1717,28 +1787,36 @@ int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, u8 flags); +void tcp_clear_md5_list(struct sock *sk); struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, const struct sock *addr_sk); #ifdef CONFIG_TCP_MD5SIG -#include <linux/jump_label.h> -extern struct static_key_false_deferred tcp_md5_needed; struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, const union tcp_md5_addr *addr, - int family); + int family, bool any_l3index); static inline struct tcp_md5sig_key * tcp_md5_do_lookup(const struct sock *sk, int l3index, const union tcp_md5_addr *addr, int family) { if (!static_branch_unlikely(&tcp_md5_needed.key)) return NULL; - return __tcp_md5_do_lookup(sk, l3index, addr, family); + return __tcp_md5_do_lookup(sk, l3index, addr, family, false); +} + +static inline struct tcp_md5sig_key * +tcp_md5_do_lookup_any_l3index(const struct sock *sk, + const union tcp_md5_addr *addr, int family) +{ + if (!static_branch_unlikely(&tcp_md5_needed.key)) + return NULL; + return __tcp_md5_do_lookup(sk, 0, addr, family, true); } enum skb_drop_reason tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, const void *saddr, const void *daddr, - int family, int dif, int sdif); + int family, int l3index, const __u8 *hash_location); #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) @@ -1750,27 +1828,29 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index, return NULL; } +static inline struct tcp_md5sig_key * +tcp_md5_do_lookup_any_l3index(const struct sock *sk, + const union tcp_md5_addr *addr, int family) +{ + return NULL; +} + static inline enum skb_drop_reason tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, const void *saddr, const void *daddr, - int family, int dif, int sdif) + int family, int l3index, const __u8 *hash_location) { return SKB_NOT_DROPPED_YET; } #define tcp_twsk_md5_key(twsk) NULL #endif -bool tcp_alloc_md5sig_pool(void); +int tcp_md5_alloc_sigpool(void); +void tcp_md5_release_sigpool(void); +void tcp_md5_add_sigpool(void); +extern int tcp_md5_sigpool_id; -struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); -static inline void tcp_put_md5sig_pool(void) -{ - local_bh_enable(); -} - -int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, - unsigned int header_len); -int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, +int tcp_md5_hash_key(struct tcp_sigpool *hp, const struct tcp_md5sig_key *key); /* From tcp_fastopen.c */ @@ -2075,7 +2155,11 @@ INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff)) INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)); +#ifdef CONFIG_INET void tcp_gro_complete(struct sk_buff *skb); +#else +static inline void tcp_gro_complete(struct sk_buff *skb) { } +#endif void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); @@ -2115,6 +2199,18 @@ struct tcp_sock_af_ops { sockptr_t optval, int optlen); #endif +#ifdef CONFIG_TCP_AO + int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen); + struct tcp_ao_key *(*ao_lookup)(const struct sock *sk, + struct sock *addr_sk, + int sndid, int rcvid); + int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key, + const struct sock *sk, + __be32 sisn, __be32 disn, bool send); + int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao, + const struct sock *sk, const struct sk_buff *skb, + const u8 *tkey, int hash_offset, u32 sne); +#endif }; struct tcp_request_sock_ops { @@ -2127,6 +2223,15 @@ struct tcp_request_sock_ops { const struct sock *sk, const struct sk_buff *skb); #endif +#ifdef CONFIG_TCP_AO + struct tcp_ao_key *(*ao_lookup)(const struct sock *sk, + struct request_sock *req, + int sndid, int rcvid); + int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk); + int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt, + struct request_sock *req, const struct sk_buff *skb, + int hash_offset, u32 sne); +#endif #ifdef CONFIG_SYN_COOKIES __u32 (*cookie_init_seq)(const struct sk_buff *skb, __u16 *mss); @@ -2167,6 +2272,76 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, } #endif +struct tcp_key { + union { + struct { + struct tcp_ao_key *ao_key; + char *traffic_key; + u32 sne; + u8 rcv_next; + }; + struct tcp_md5sig_key *md5_key; + }; + enum { + TCP_KEY_NONE = 0, + TCP_KEY_MD5, + TCP_KEY_AO, + } type; +}; + +static inline void tcp_get_current_key(const struct sock *sk, + struct tcp_key *out) +{ +#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG) + const struct tcp_sock *tp = tcp_sk(sk); +#endif + +#ifdef CONFIG_TCP_AO + if (static_branch_unlikely(&tcp_ao_needed.key)) { + struct tcp_ao_info *ao; + + ao = rcu_dereference_protected(tp->ao_info, + lockdep_sock_is_held(sk)); + if (ao) { + out->ao_key = READ_ONCE(ao->current_key); + out->type = TCP_KEY_AO; + return; + } + } +#endif +#ifdef CONFIG_TCP_MD5SIG + if (static_branch_unlikely(&tcp_md5_needed.key) && + rcu_access_pointer(tp->md5sig_info)) { + out->md5_key = tp->af_specific->md5_lookup(sk, sk); + if (out->md5_key) { + out->type = TCP_KEY_MD5; + return; + } + } +#endif + out->type = TCP_KEY_NONE; +} + +static inline bool tcp_key_is_md5(const struct tcp_key *key) +{ +#ifdef CONFIG_TCP_MD5SIG + if (static_branch_unlikely(&tcp_md5_needed.key) && + key->type == TCP_KEY_MD5) + return true; +#endif + return false; +} + +static inline bool tcp_key_is_ao(const struct tcp_key *key) +{ +#ifdef CONFIG_TCP_AO + if (static_branch_unlikely(&tcp_ao_needed.key) && + key->type == TCP_KEY_AO) + return true; +#endif + return false; +} + int tcpv4_offload_init(void); void tcp_v4_init(void); @@ -2525,4 +2700,116 @@ static inline u64 tcp_transmit_time(const struct sock *sk) return 0; } +static inline int tcp_parse_auth_options(const struct tcphdr *th, + const u8 **md5_hash, const struct tcp_ao_hdr **aoh) +{ + const u8 *md5_tmp, *ao_tmp; + int ret; + + ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp); + if (ret) + return ret; + + if (md5_hash) + *md5_hash = md5_tmp; + + if (aoh) { + if (!ao_tmp) + *aoh = NULL; + else + *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2); + } + + return 0; +} + +static inline bool tcp_ao_required(struct sock *sk, const void *saddr, + int family, int l3index, bool stat_inc) +{ +#ifdef CONFIG_TCP_AO + struct tcp_ao_info *ao_info; + struct tcp_ao_key *ao_key; + + if (!static_branch_unlikely(&tcp_ao_needed.key)) + return false; + + ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info, + lockdep_sock_is_held(sk)); + if (!ao_info) + return false; + + ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1); + if (ao_info->ao_required || ao_key) { + if (stat_inc) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED); + atomic64_inc(&ao_info->counters.ao_required); + } + return true; + } +#endif + return false; +} + +/* Called with rcu_read_lock() */ +static inline enum skb_drop_reason +tcp_inbound_hash(struct sock *sk, const struct request_sock *req, + const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int dif, int sdif) +{ + const struct tcphdr *th = tcp_hdr(skb); + const struct tcp_ao_hdr *aoh; + const __u8 *md5_location; + int l3index; + + /* Invalid option or two times meet any of auth options */ + if (tcp_parse_auth_options(th, &md5_location, &aoh)) { + tcp_hash_fail("TCP segment has incorrect auth options set", + family, skb, ""); + return SKB_DROP_REASON_TCP_AUTH_HDR; + } + + if (req) { + if (tcp_rsk_used_ao(req) != !!aoh) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); + tcp_hash_fail("TCP connection can't start/end using TCP-AO", + family, skb, "%s", + !aoh ? "missing AO" : "AO signed"); + return SKB_DROP_REASON_TCP_AOFAILURE; + } + } + + /* sdif set, means packet ingressed via a device + * in an L3 domain and dif is set to the l3mdev + */ + l3index = sdif ? dif : 0; + + /* Fast path: unsigned segments */ + if (likely(!md5_location && !aoh)) { + /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid + * for the remote peer. On TCP-AO established connection + * the last key is impossible to remove, so there's + * always at least one current_key. + */ + if (tcp_ao_required(sk, saddr, family, l3index, true)) { + tcp_hash_fail("AO hash is required, but not found", + family, skb, "L3 index %d", l3index); + return SKB_DROP_REASON_TCP_AONOTFOUND; + } + if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); + tcp_hash_fail("MD5 Hash not found", + family, skb, "L3 index %d", l3index); + return SKB_DROP_REASON_TCP_MD5NOTFOUND; + } + return SKB_NOT_DROPPED_YET; + } + + if (aoh) + return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh); + + return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, + l3index, md5_location); +} + #endif /* _TCP_H */ diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h new file mode 100644 index 000000000000..471e177362b4 --- /dev/null +++ b/include/net/tcp_ao.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _TCP_AO_H +#define _TCP_AO_H + +#define TCP_AO_KEY_ALIGN 1 +#define __tcp_ao_key_align __aligned(TCP_AO_KEY_ALIGN) + +union tcp_ao_addr { + struct in_addr a4; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr a6; +#endif +}; + +struct tcp_ao_hdr { + u8 kind; + u8 length; + u8 keyid; + u8 rnext_keyid; +}; + +struct tcp_ao_counters { + atomic64_t pkt_good; + atomic64_t pkt_bad; + atomic64_t key_not_found; + atomic64_t ao_required; + atomic64_t dropped_icmp; +}; + +struct tcp_ao_key { + struct hlist_node node; + union tcp_ao_addr addr; + u8 key[TCP_AO_MAXKEYLEN] __tcp_ao_key_align; + unsigned int tcp_sigpool_id; + unsigned int digest_size; + int l3index; + u8 prefixlen; + u8 family; + u8 keylen; + u8 keyflags; + u8 sndid; + u8 rcvid; + u8 maclen; + struct rcu_head rcu; + atomic64_t pkt_good; + atomic64_t pkt_bad; + u8 traffic_keys[]; +}; + +static inline u8 *rcv_other_key(struct tcp_ao_key *key) +{ + return key->traffic_keys; +} + +static inline u8 *snd_other_key(struct tcp_ao_key *key) +{ + return key->traffic_keys + key->digest_size; +} + +static inline int tcp_ao_maclen(const struct tcp_ao_key *key) +{ + return key->maclen; +} + +/* Use tcp_ao_len_aligned() for TCP header calculations */ +static inline int tcp_ao_len(const struct tcp_ao_key *key) +{ + return tcp_ao_maclen(key) + sizeof(struct tcp_ao_hdr); +} + +static inline int tcp_ao_len_aligned(const struct tcp_ao_key *key) +{ + return round_up(tcp_ao_len(key), 4); +} + +static inline unsigned int tcp_ao_digest_size(struct tcp_ao_key *key) +{ + return key->digest_size; +} + +static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key) +{ + return sizeof(struct tcp_ao_key) + (key->digest_size << 1); +} + +struct tcp_ao_info { + /* List of tcp_ao_key's */ + struct hlist_head head; + /* current_key and rnext_key aren't maintained on listen sockets. + * Their purpose is to cache keys on established connections, + * saving needless lookups. Never dereference any of them from + * listen sockets. + * ::current_key may change in RX to the key that was requested by + * the peer, please use READ_ONCE()/WRITE_ONCE() in order to avoid + * load/store tearing. + * Do the same for ::rnext_key, if you don't hold socket lock + * (it's changed only by userspace request in setsockopt()). + */ + struct tcp_ao_key *current_key; + struct tcp_ao_key *rnext_key; + struct tcp_ao_counters counters; + u32 ao_required :1, + accept_icmps :1, + __unused :30; + __be32 lisn; + __be32 risn; + /* Sequence Number Extension (SNE) are upper 4 bytes for SEQ, + * that protect TCP-AO connection from replayed old TCP segments. + * See RFC5925 (6.2). + * In order to get correct SNE, there's a helper tcp_ao_compute_sne(). + * It needs SEQ basis to understand whereabouts are lower SEQ numbers. + * According to that basis vector, it can provide incremented SNE + * when SEQ rolls over or provide decremented SNE when there's + * a retransmitted segment from before-rolling over. + * - for request sockets such basis is rcv_isn/snt_isn, which seems + * good enough as it's unexpected to receive 4 Gbytes on reqsk. + * - for full sockets the basis is rcv_nxt/snd_una. snd_una is + * taken instead of snd_nxt as currently it's easier to track + * in tcp_snd_una_update(), rather than updating SNE in all + * WRITE_ONCE(tp->snd_nxt, ...) + * - for time-wait sockets the basis is tw_rcv_nxt/tw_snd_nxt. + * tw_snd_nxt is not expected to change, while tw_rcv_nxt may. + */ + u32 snd_sne; + u32 rcv_sne; + refcount_t refcnt; /* Protects twsk destruction */ + struct rcu_head rcu; +}; + +#ifdef CONFIG_TCP_MD5SIG +#include <linux/jump_label.h> +extern struct static_key_false_deferred tcp_md5_needed; +#define static_branch_tcp_md5() static_branch_unlikely(&tcp_md5_needed.key) +#else +#define static_branch_tcp_md5() false +#endif +#ifdef CONFIG_TCP_AO +/* TCP-AO structures and functions */ +#include <linux/jump_label.h> +extern struct static_key_false_deferred tcp_ao_needed; +#define static_branch_tcp_ao() static_branch_unlikely(&tcp_ao_needed.key) +#else +#define static_branch_tcp_ao() false +#endif + +static inline bool tcp_hash_should_produce_warnings(void) +{ + return static_branch_tcp_md5() || static_branch_tcp_ao(); +} + +#define tcp_hash_fail(msg, family, skb, fmt, ...) \ +do { \ + const struct tcphdr *th = tcp_hdr(skb); \ + char hdr_flags[6]; \ + char *f = hdr_flags; \ + \ + if (!tcp_hash_should_produce_warnings()) \ + break; \ + if (th->fin) \ + *f++ = 'F'; \ + if (th->syn) \ + *f++ = 'S'; \ + if (th->rst) \ + *f++ = 'R'; \ + if (th->psh) \ + *f++ = 'P'; \ + if (th->ack) \ + *f++ = '.'; \ + *f = 0; \ + if ((family) == AF_INET) { \ + net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \ + msg, &ip_hdr(skb)->saddr, ntohs(th->source), \ + &ip_hdr(skb)->daddr, ntohs(th->dest), \ + hdr_flags, ##__VA_ARGS__); \ + } else { \ + net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \ + msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \ + &ipv6_hdr(skb)->daddr, ntohs(th->dest), \ + hdr_flags, ##__VA_ARGS__); \ + } \ +} while (0) + +#ifdef CONFIG_TCP_AO +/* TCP-AO structures and functions */ +struct tcp4_ao_context { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + __be32 sisn; + __be32 disn; +}; + +struct tcp6_ao_context { + struct in6_addr saddr; + struct in6_addr daddr; + __be16 sport; + __be16 dport; + __be32 sisn; + __be32 disn; +}; + +struct tcp_sigpool; +#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \ + TCPF_CLOSE | TCPF_CLOSE_WAIT | \ + TCPF_LAST_ACK | TCPF_CLOSING) + +int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb, + struct tcp_ao_key *key, struct tcphdr *th, + __u8 *hash_location); +int tcp_ao_hash_skb(unsigned short int family, + char *ao_hash, struct tcp_ao_key *key, + const struct sock *sk, const struct sk_buff *skb, + const u8 *tkey, int hash_offset, u32 sne); +int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family, + sockptr_t optval, int optlen); +struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao, + int sndid, int rcvid); +int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk, + struct request_sock *req, struct sk_buff *skb, + int family); +int tcp_ao_calc_traffic_key(struct tcp_ao_key *mkt, u8 *key, void *ctx, + unsigned int len, struct tcp_sigpool *hp); +void tcp_ao_destroy_sock(struct sock *sk, bool twsk); +void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp); +bool tcp_ao_ignore_icmp(const struct sock *sk, int family, int type, int code); +int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen); +int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen); +int tcp_ao_get_repair(struct sock *sk, sockptr_t optval, sockptr_t optlen); +int tcp_ao_set_repair(struct sock *sk, sockptr_t optval, unsigned int optlen); +enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk, + const struct sk_buff *skb, unsigned short int family, + const struct request_sock *req, int l3index, + const struct tcp_ao_hdr *aoh); +u32 tcp_ao_compute_sne(u32 next_sne, u32 next_seq, u32 seq); +struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, int l3index, + const union tcp_ao_addr *addr, + int family, int sndid, int rcvid); +int tcp_ao_hash_hdr(unsigned short family, char *ao_hash, + struct tcp_ao_key *key, const u8 *tkey, + const union tcp_ao_addr *daddr, + const union tcp_ao_addr *saddr, + const struct tcphdr *th, u32 sne); +int tcp_ao_prepare_reset(const struct sock *sk, struct sk_buff *skb, + const struct tcp_ao_hdr *aoh, int l3index, u32 seq, + struct tcp_ao_key **key, char **traffic_key, + bool *allocated_traffic_key, u8 *keyid, u32 *sne); + +/* ipv4 specific functions */ +int tcp_v4_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen); +struct tcp_ao_key *tcp_v4_ao_lookup(const struct sock *sk, struct sock *addr_sk, + int sndid, int rcvid); +int tcp_v4_ao_synack_hash(char *ao_hash, struct tcp_ao_key *mkt, + struct request_sock *req, const struct sk_buff *skb, + int hash_offset, u32 sne); +int tcp_v4_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key, + const struct sock *sk, + __be32 sisn, __be32 disn, bool send); +int tcp_v4_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key, + struct request_sock *req); +struct tcp_ao_key *tcp_v4_ao_lookup_rsk(const struct sock *sk, + struct request_sock *req, + int sndid, int rcvid); +int tcp_v4_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key, + const struct sock *sk, const struct sk_buff *skb, + const u8 *tkey, int hash_offset, u32 sne); +/* ipv6 specific functions */ +int tcp_v6_ao_hash_pseudoheader(struct tcp_sigpool *hp, + const struct in6_addr *daddr, + const struct in6_addr *saddr, int nbytes); +int tcp_v6_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key, + const struct sk_buff *skb, __be32 sisn, __be32 disn); +int tcp_v6_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key, + const struct sock *sk, __be32 sisn, + __be32 disn, bool send); +int tcp_v6_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key, + struct request_sock *req); +struct tcp_ao_key *tcp_v6_ao_lookup(const struct sock *sk, + struct sock *addr_sk, int sndid, int rcvid); +struct tcp_ao_key *tcp_v6_ao_lookup_rsk(const struct sock *sk, + struct request_sock *req, + int sndid, int rcvid); +int tcp_v6_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key, + const struct sock *sk, const struct sk_buff *skb, + const u8 *tkey, int hash_offset, u32 sne); +int tcp_v6_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen); +int tcp_v6_ao_synack_hash(char *ao_hash, struct tcp_ao_key *ao_key, + struct request_sock *req, const struct sk_buff *skb, + int hash_offset, u32 sne); +void tcp_ao_established(struct sock *sk); +void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb); +void tcp_ao_connect_init(struct sock *sk); +void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb, + struct request_sock *req, unsigned short int family); +#else /* CONFIG_TCP_AO */ + +static inline int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb, + struct tcp_ao_key *key, struct tcphdr *th, + __u8 *hash_location) +{ + return 0; +} + +static inline void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb, + struct request_sock *req, unsigned short int family) +{ +} + +static inline bool tcp_ao_ignore_icmp(const struct sock *sk, int family, + int type, int code) +{ + return false; +} + +static inline enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk, + const struct sk_buff *skb, unsigned short int family, + const struct request_sock *req, int l3index, + const struct tcp_ao_hdr *aoh) +{ + return SKB_NOT_DROPPED_YET; +} + +static inline struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, + int l3index, const union tcp_ao_addr *addr, + int family, int sndid, int rcvid) +{ + return NULL; +} + +static inline void tcp_ao_destroy_sock(struct sock *sk, bool twsk) +{ +} + +static inline void tcp_ao_established(struct sock *sk) +{ +} + +static inline void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb) +{ +} + +static inline void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, + struct tcp_sock *tp) +{ +} + +static inline void tcp_ao_connect_init(struct sock *sk) +{ +} + +static inline int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen) +{ + return -ENOPROTOOPT; +} + +static inline int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen) +{ + return -ENOPROTOOPT; +} + +static inline int tcp_ao_get_repair(struct sock *sk, + sockptr_t optval, sockptr_t optlen) +{ + return -ENOPROTOOPT; +} + +static inline int tcp_ao_set_repair(struct sock *sk, + sockptr_t optval, unsigned int optlen) +{ + return -ENOPROTOOPT; +} +#endif + +#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) +int tcp_do_parse_auth_options(const struct tcphdr *th, + const u8 **md5_hash, const u8 **ao_hash); +#else +static inline int tcp_do_parse_auth_options(const struct tcphdr *th, + const u8 **md5_hash, const u8 **ao_hash) +{ + *md5_hash = NULL; + *ao_hash = NULL; + return 0; +} +#endif + +#endif /* _TCP_AO_H */ diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h index cc00118acca1..d60e8148ff4c 100644 --- a/include/net/tcp_states.h +++ b/include/net/tcp_states.h @@ -22,6 +22,7 @@ enum { TCP_LISTEN, TCP_CLOSING, /* Now a valid state */ TCP_NEW_SYN_RECV, + TCP_BOUND_INACTIVE, /* Pseudo-state for inet_diag */ TCP_MAX_STATES /* Leave at the end! */ }; @@ -43,6 +44,7 @@ enum { TCPF_LISTEN = (1 << TCP_LISTEN), TCPF_CLOSING = (1 << TCP_CLOSING), TCPF_NEW_SYN_RECV = (1 << TCP_NEW_SYN_RECV), + TCPF_BOUND_INACTIVE = (1 << TCP_BOUND_INACTIVE), }; #endif /* _LINUX_TCP_STATES_H */ diff --git a/include/net/tcx.h b/include/net/tcx.h index 264f147953ba..04be9377785d 100644 --- a/include/net/tcx.h +++ b/include/net/tcx.h @@ -38,16 +38,11 @@ static inline struct tcx_entry *tcx_entry(struct bpf_mprog_entry *entry) return container_of(bundle, struct tcx_entry, bundle); } -static inline struct tcx_link *tcx_link(struct bpf_link *link) +static inline struct tcx_link *tcx_link(const struct bpf_link *link) { return container_of(link, struct tcx_link, link); } -static inline const struct tcx_link *tcx_link_const(const struct bpf_link *link) -{ - return tcx_link((struct bpf_link *)link); -} - void tcx_inc(void); void tcx_dec(void); diff --git a/include/net/tls.h b/include/net/tls.h index a2b44578dcb7..962f0c501111 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -61,7 +61,8 @@ struct tls_rec; #define TLS_AAD_SPACE_SIZE 13 -#define MAX_IV_SIZE 16 +#define TLS_MAX_IV_SIZE 16 +#define TLS_MAX_SALT_SIZE 4 #define TLS_TAG_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8 #define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE @@ -149,6 +150,7 @@ struct tls_record_info { skb_frag_t frags[MAX_SKB_FRAGS]; }; +#define TLS_DRIVER_STATE_SIZE_TX 16 struct tls_offload_context_tx { struct crypto_aead *aead_send; spinlock_t lock; /* protects records list */ @@ -162,17 +164,13 @@ struct tls_offload_context_tx { void (*sk_destruct)(struct sock *sk); struct work_struct destruct_work; struct tls_context *ctx; - u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough * driver specific state to justify another layer of indirection */ -#define TLS_DRIVER_STATE_SIZE_TX 16 + u8 driver_state[TLS_DRIVER_STATE_SIZE_TX] __aligned(8); }; -#define TLS_OFFLOAD_CONTEXT_SIZE_TX \ - (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) - enum tls_context_flags { /* tls_device_down was called after the netdev went down, device state * was released, and kTLS works in software, even though rx_conf is @@ -193,8 +191,8 @@ enum tls_context_flags { }; struct cipher_context { - char *iv; - char *rec_seq; + char iv[TLS_MAX_IV_SIZE + TLS_MAX_SALT_SIZE]; + char rec_seq[TLS_MAX_REC_SEQ_SIZE]; }; union tls_crypto_context { @@ -302,6 +300,7 @@ struct tls_offload_resync_async { u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; }; +#define TLS_DRIVER_STATE_SIZE_RX 8 struct tls_offload_context_rx { /* sw must be the first member of tls_offload_context_rx */ struct tls_sw_context_rx sw; @@ -325,17 +324,13 @@ struct tls_offload_context_rx { struct tls_offload_resync_async *resync_async; }; }; - u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough * driver specific state to justify another layer of indirection */ -#define TLS_DRIVER_STATE_SIZE_RX 8 + u8 driver_state[TLS_DRIVER_STATE_SIZE_RX] __aligned(8); }; -#define TLS_OFFLOAD_CONTEXT_SIZE_RX \ - (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) - struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn); diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 0ca9b7a11baf..d716214fe03d 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -154,13 +154,30 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, - struct net_device *dev, struct in6_addr *saddr, - struct in6_addr *daddr, + struct net_device *dev, + const struct in6_addr *saddr, + const struct in6_addr *daddr, __u8 prio, __u8 ttl, __be32 label, __be16 src_port, __be16 dst_port, bool nocheck); void udp_tunnel_sock_release(struct socket *sock); +struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb, + struct net_device *dev, + struct net *net, int oif, + __be32 *saddr, + const struct ip_tunnel_key *key, + __be16 sport, __be16 dport, u8 tos, + struct dst_cache *dst_cache); +struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb, + struct net_device *dev, + struct net *net, + struct socket *sock, int oif, + struct in6_addr *saddr, + const struct ip_tunnel_key *key, + __be16 sport, __be16 dport, u8 dsfield, + struct dst_cache *dst_cache); + struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, __be16 flags, __be64 tunnel_id, int md_size); @@ -174,16 +191,13 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) } #endif -static inline void udp_tunnel_encap_enable(struct socket *sock) +static inline void udp_tunnel_encap_enable(struct sock *sk) { - struct udp_sock *up = udp_sk(sock->sk); - - if (up->encap_enabled) + if (udp_test_and_set_bit(ENCAP_ENABLED, sk)) return; - up->encap_enabled = 1; #if IS_ENABLED(CONFIG_IPV6) - if (sock->sk->sk_family == PF_INET6) + if (READ_ONCE(sk->sk_family) == PF_INET6) ipv6_stub->udpv6_encap_enable(); #endif udp_encap_enable(); diff --git a/include/net/udplite.h b/include/net/udplite.h index bd33ff2b8f42..786919d29f8d 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -66,14 +66,18 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) /* Fast-path computation of checksum. Socket may not be locked. */ static inline __wsum udplite_csum(struct sk_buff *skb) { - const struct udp_sock *up = udp_sk(skb->sk); const int off = skb_transport_offset(skb); + const struct sock *sk = skb->sk; int len = skb->len - off; - if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) { - if (0 < up->pcslen) - len = up->pcslen; - udp_hdr(skb)->len = htons(up->pcslen); + if (udp_test_bit(UDPLITE_SEND_CC, sk)) { + u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen); + + if (pcslen < len) { + if (pcslen > 0) + len = pcslen; + udp_hdr(skb)->len = htons(pcslen); + } } skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 6a9f8a5f387c..33ba6fc151cf 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -210,22 +210,23 @@ struct vxlan_rdst { }; struct vxlan_config { - union vxlan_addr remote_ip; - union vxlan_addr saddr; - __be32 vni; - int remote_ifindex; - int mtu; - __be16 dst_port; - u16 port_min; - u16 port_max; - u8 tos; - u8 ttl; - __be32 label; - u32 flags; - unsigned long age_interval; - unsigned int addrmax; - bool no_share; - enum ifla_vxlan_df df; + union vxlan_addr remote_ip; + union vxlan_addr saddr; + __be32 vni; + int remote_ifindex; + int mtu; + __be16 dst_port; + u16 port_min; + u16 port_max; + u8 tos; + u8 ttl; + __be32 label; + enum ifla_vxlan_label_policy label_policy; + u32 flags; + unsigned long age_interval; + unsigned int addrmax; + bool no_share; + enum ifla_vxlan_df df; }; enum { diff --git a/include/net/xdp.h b/include/net/xdp.h index de08c8e0d134..e6770dd40c91 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -16,7 +16,7 @@ * * The XDP RX-queue info (xdp_rxq_info) is associated with the driver * level RX-ring queues. It is information that is specific to how - * the driver have configured a given RX-ring queue. + * the driver has configured a given RX-ring queue. * * Each xdp_buff frame received in the driver carries a (pointer) * reference to this xdp_rxq_info structure. This provides the XDP @@ -32,7 +32,7 @@ * The struct is not directly tied to the XDP prog. A new XDP prog * can be attached as long as it doesn't change the underlying * RX-ring. If the RX-ring does change significantly, the NIC driver - * naturally need to stop the RX-ring before purging and reallocating + * naturally needs to stop the RX-ring before purging and reallocating * memory. In that process the driver MUST call unregister (which * also applies for driver shutdown and unload). The register API is * also mandatory during RX-ring setup. @@ -369,7 +369,12 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp) static inline bool xdp_metalen_invalid(unsigned long metalen) { - return (metalen & (sizeof(__u32) - 1)) || (metalen > 32); + unsigned long meta_max; + + meta_max = type_max(typeof_member(struct skb_shared_info, meta_len)); + BUILD_BUG_ON(!__builtin_constant_p(meta_max)); + + return !IS_ALIGNED(metalen, sizeof(u32)) || metalen > meta_max; } struct xdp_attachment_info { @@ -383,14 +388,29 @@ void xdp_attachment_setup(struct xdp_attachment_info *info, #define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE +/* Define the relationship between xdp-rx-metadata kfunc and + * various other entities: + * - xdp_rx_metadata enum + * - netdev netlink enum (Documentation/netlink/specs/netdev.yaml) + * - kfunc name + * - xdp_metadata_ops field + */ #define XDP_METADATA_KFUNC_xxx \ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \ - bpf_xdp_metadata_rx_timestamp) \ + NETDEV_XDP_RX_METADATA_TIMESTAMP, \ + bpf_xdp_metadata_rx_timestamp, \ + xmo_rx_timestamp) \ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \ - bpf_xdp_metadata_rx_hash) \ - -enum { -#define XDP_METADATA_KFUNC(name, _) name, + NETDEV_XDP_RX_METADATA_HASH, \ + bpf_xdp_metadata_rx_hash, \ + xmo_rx_hash) \ + XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_VLAN_TAG, \ + NETDEV_XDP_RX_METADATA_VLAN_TAG, \ + bpf_xdp_metadata_rx_vlan_tag, \ + xmo_rx_vlan_tag) \ + +enum xdp_rx_metadata { +#define XDP_METADATA_KFUNC(name, _, __, ___) name, XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC MAX_XDP_METADATA_KFUNC, @@ -416,6 +436,7 @@ enum xdp_rss_hash_type { XDP_RSS_L4_UDP = BIT(5), XDP_RSS_L4_SCTP = BIT(6), XDP_RSS_L4_IPSEC = BIT(7), /* L4 based hash include IPSEC SPI */ + XDP_RSS_L4_ICMP = BIT(8), /* Second part: RSS hash type combinations used for driver HW mapping */ XDP_RSS_TYPE_NONE = 0, @@ -431,11 +452,13 @@ enum xdp_rss_hash_type { XDP_RSS_TYPE_L4_IPV4_UDP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP, XDP_RSS_TYPE_L4_IPV4_SCTP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP, XDP_RSS_TYPE_L4_IPV4_IPSEC = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC, + XDP_RSS_TYPE_L4_IPV4_ICMP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_ICMP, XDP_RSS_TYPE_L4_IPV6_TCP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP, XDP_RSS_TYPE_L4_IPV6_UDP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP, XDP_RSS_TYPE_L4_IPV6_SCTP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP, XDP_RSS_TYPE_L4_IPV6_IPSEC = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC, + XDP_RSS_TYPE_L4_IPV6_ICMP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_ICMP, XDP_RSS_TYPE_L4_IPV6_TCP_EX = XDP_RSS_TYPE_L4_IPV6_TCP | XDP_RSS_L3_DYNHDR, XDP_RSS_TYPE_L4_IPV6_UDP_EX = XDP_RSS_TYPE_L4_IPV6_UDP | XDP_RSS_L3_DYNHDR, @@ -446,6 +469,8 @@ struct xdp_metadata_ops { int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp); int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash, enum xdp_rss_hash_type *rss_type); + int (*xmo_rx_vlan_tag)(const struct xdp_md *ctx, __be16 *vlan_proto, + u16 *vlan_tci); }; #ifdef CONFIG_NET diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 1617af380162..3cb4dc9bd70e 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -14,6 +14,8 @@ #include <linux/mm.h> #include <net/sock.h> +#define XDP_UMEM_SG_FLAG (1 << 1) + struct net_device; struct xsk_queue; struct xdp_buff; @@ -28,6 +30,7 @@ struct xdp_umem { struct user_struct *user; refcount_t users; u8 flags; + u8 tx_metadata_len; bool zc; struct page **pgs; int id; @@ -61,6 +64,13 @@ struct xdp_sock { struct xsk_queue *tx ____cacheline_aligned_in_smp; struct list_head tx_list; + /* record the number of tx descriptors sent by this xsk and + * when it exceeds MAX_PER_SOCKET_BUDGET, an opportunity needs + * to be given to other xsks for sending tx descriptors, thereby + * preventing other XSKs from being starved. + */ + u32 tx_budget_spent; + /* Protects generic receive. */ spinlock_t rx_lock; @@ -83,12 +93,105 @@ struct xdp_sock { struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */ }; +/* + * AF_XDP TX metadata hooks for network devices. + * The following hooks can be defined; unless noted otherwise, they are + * optional and can be filled with a null pointer. + * + * void (*tmo_request_timestamp)(void *priv) + * Called when AF_XDP frame requested egress timestamp. + * + * u64 (*tmo_fill_timestamp)(void *priv) + * Called when AF_XDP frame, that had requested egress timestamp, + * received a completion. The hook needs to return the actual HW timestamp. + * + * void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv) + * Called when AF_XDP frame requested HW checksum offload. csum_start + * indicates position where checksumming should start. + * csum_offset indicates position where checksum should be stored. + * + */ +struct xsk_tx_metadata_ops { + void (*tmo_request_timestamp)(void *priv); + u64 (*tmo_fill_timestamp)(void *priv); + void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv); +}; + #ifdef CONFIG_XDP_SOCKETS int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); void __xsk_map_flush(void); +/** + * xsk_tx_metadata_to_compl - Save enough relevant metadata information + * to perform tx completion in the future. + * @meta: pointer to AF_XDP metadata area + * @compl: pointer to output struct xsk_tx_metadata_to_compl + * + * This function should be called by the networking device when + * it prepares AF_XDP egress packet. The value of @compl should be stored + * and passed to xsk_tx_metadata_complete upon TX completion. + */ +static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta, + struct xsk_tx_metadata_compl *compl) +{ + if (!meta) + return; + + if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP) + compl->tx_timestamp = &meta->completion.tx_timestamp; + else + compl->tx_timestamp = NULL; +} + +/** + * xsk_tx_metadata_request - Evaluate AF_XDP TX metadata at submission + * and call appropriate xsk_tx_metadata_ops operation. + * @meta: pointer to AF_XDP metadata area + * @ops: pointer to struct xsk_tx_metadata_ops + * @priv: pointer to driver-private aread + * + * This function should be called by the networking device when + * it prepares AF_XDP egress packet. + */ +static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta, + const struct xsk_tx_metadata_ops *ops, + void *priv) +{ + if (!meta) + return; + + if (ops->tmo_request_timestamp) + if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP) + ops->tmo_request_timestamp(priv); + + if (ops->tmo_request_checksum) + if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) + ops->tmo_request_checksum(meta->request.csum_start, + meta->request.csum_offset, priv); +} + +/** + * xsk_tx_metadata_complete - Evaluate AF_XDP TX metadata at completion + * and call appropriate xsk_tx_metadata_ops operation. + * @compl: pointer to completion metadata produced from xsk_tx_metadata_to_compl + * @ops: pointer to struct xsk_tx_metadata_ops + * @priv: pointer to driver-private aread + * + * This function should be called by the networking device upon + * AF_XDP egress completion. + */ +static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl, + const struct xsk_tx_metadata_ops *ops, + void *priv) +{ + if (!compl) + return; + + *compl->tx_timestamp = ops->tmo_fill_timestamp(priv); +} + #else static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) @@ -105,6 +208,32 @@ static inline void __xsk_map_flush(void) { } +static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta, + struct xsk_tx_metadata_compl *compl) +{ +} + +static inline void xsk_tx_metadata_request(struct xsk_tx_metadata *meta, + const struct xsk_tx_metadata_ops *ops, + void *priv) +{ +} + +static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl, + const struct xsk_tx_metadata_ops *ops, + void *priv) +{ +} + #endif /* CONFIG_XDP_SOCKETS */ +#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET) +bool xsk_map_check_flush(void); +#else +static inline bool xsk_map_check_flush(void) +{ + return false; +} +#endif + #endif /* _LINUX_XDP_SOCK_H */ diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 1f6fc8c7a84c..c9aec9ab6191 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -12,6 +12,12 @@ #define XDP_UMEM_MIN_CHUNK_SHIFT 11 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT) +struct xsk_cb_desc { + void *src; + u8 off; + u8 bytes; +}; + #ifdef CONFIG_XDP_SOCKETS void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); @@ -47,6 +53,12 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, xp_set_rxq_info(pool, rxq); } +static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, + struct xsk_cb_desc *desc) +{ + xp_fill_cb(pool, desc); +} + static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) { #ifdef CONFIG_NET_RX_BUSY_POLL @@ -147,11 +159,29 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) return ret; } +static inline void xsk_buff_del_tail(struct xdp_buff *tail) +{ + struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp); + + list_del(&xskb->xskb_list_node); +} + +static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) +{ + struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); + struct xdp_buff_xsk *frag; + + frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, + xskb_list_node); + return &frag->xdp; +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; xdp->data_meta = xdp->data; xdp->data_end = xdp->data + size; + xdp->flags = 0; } static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, @@ -165,6 +195,30 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) return xp_raw_get_data(pool, addr); } +#define XDP_TXMD_FLAGS_VALID ( \ + XDP_TXMD_FLAGS_TIMESTAMP | \ + XDP_TXMD_FLAGS_CHECKSUM | \ + 0) + +static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta) +{ + return !(meta->flags & ~XDP_TXMD_FLAGS_VALID); +} + +static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) +{ + struct xsk_tx_metadata *meta; + + if (!pool->tx_metadata_len) + return NULL; + + meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len; + if (unlikely(!xsk_buff_valid_tx_metadata(meta))) + return NULL; /* no way to signal the error to the user */ + + return meta; +} + static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) { struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); @@ -250,6 +304,11 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, { } +static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, + struct xsk_cb_desc *desc) +{ +} + static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) { return 0; @@ -309,6 +368,15 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) return NULL; } +static inline void xsk_buff_del_tail(struct xdp_buff *tail) +{ +} + +static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) +{ + return NULL; +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { } @@ -324,6 +392,16 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) return NULL; } +static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta) +{ + return false; +} + +static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) +{ + return NULL; +} + static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) { } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 363c7d510554..1d107241b901 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1207,20 +1207,20 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1); } -int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, +int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, unsigned int family, int reverse); -static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, +static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, unsigned int family) { - return __xfrm_decode_session(skb, fl, family, 0); + return __xfrm_decode_session(net, skb, fl, family, 0); } -static inline int xfrm_decode_session_reverse(struct sk_buff *skb, +static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, struct flowi *fl, unsigned int family) { - return __xfrm_decode_session(skb, fl, family, 1); + return __xfrm_decode_session(net, skb, fl, family, 1); } int __xfrm_route_forward(struct sk_buff *skb, unsigned short family); @@ -1296,7 +1296,7 @@ static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *sk { return 1; } -static inline int xfrm_decode_session_reverse(struct sk_buff *skb, +static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, struct flowi *fl, unsigned int family) { @@ -1669,7 +1669,6 @@ int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb); #endif void xfrm_local_error(struct sk_buff *skb, int mtu); -int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm4_transport_finish(struct sk_buff *skb, int async); @@ -1689,7 +1688,6 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char prot int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); void xfrm4_local_error(struct sk_buff *skb, u32 mtu); -int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, struct ip6_tnl *t); int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, @@ -1712,6 +1710,10 @@ int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); +struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + struct sk_buff *skb); +struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + struct sk_buff *skb); int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen); #else @@ -2166,7 +2168,7 @@ static inline bool xfrm6_local_dontfrag(const struct sock *sk) proto = sk->sk_protocol; if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) - return inet6_sk(sk)->dontfrag; + return inet6_test_bit(DONTFRAG, sk); return false; } @@ -2188,4 +2190,13 @@ static inline int register_xfrm_interface_bpf(void) #endif +#if IS_ENABLED(CONFIG_DEBUG_INFO_BTF) +int register_xfrm_state_bpf(void); +#else +static inline int register_xfrm_state_bpf(void) +{ + return 0; +} +#endif + #endif /* _NET_XFRM_H */ diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index b0bdff26fc88..99dd7376df6a 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -12,6 +12,7 @@ struct xsk_buff_pool; struct xdp_rxq_info; +struct xsk_cb_desc; struct xsk_queue; struct xdp_desc; struct xdp_umem; @@ -33,6 +34,7 @@ struct xdp_buff_xsk { }; #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb)) +#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t)) struct xsk_dma_map { dma_addr_t *dma_pages; @@ -77,10 +79,12 @@ struct xsk_buff_pool { u32 chunk_size; u32 chunk_shift; u32 frame_len; + u8 tx_metadata_len; /* inherited from umem */ u8 cached_need_wakeup; bool uses_need_wakeup; bool dma_need_sync; bool unaligned; + bool tx_sw_csum; void *addrs; /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect: * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when @@ -132,6 +136,7 @@ static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_p /* AF_XDP ZC drivers, via xdp_sock_buff.h */ void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq); +void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc); int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, unsigned long attrs, struct page **pages, u32 nr_pages); void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs); @@ -233,4 +238,9 @@ static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb) return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); } +static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool) +{ + return pool->tx_metadata_len > 0; +} + #endif /* XSK_BUFF_POOL_H_ */ diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 2e3843b761e8..3f1b58d8b4bf 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -277,6 +277,8 @@ enum ib_port_capability_mask2_bits { IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4, IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5, IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10, + IB_PORT_EXTENDED_SPEEDS2_SUP = 1 << 11, + IB_PORT_LINK_SPEED_XDR_SUP = 1 << 12, }; #define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26) diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 95896472a82b..565a85044541 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -77,6 +77,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, { __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, umem->sgt_append.sgt.nents, pgsz); + biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1); + biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz); +} + +static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter) +{ + return __rdma_block_iter_next(biter) && biter->__sg_numblocks--; } /** @@ -92,7 +99,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, */ #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ - __rdma_block_iter_next(biter);) + __rdma_umem_block_iter_next(biter);) #ifdef CONFIG_INFINIBAND_USER_MEM diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 533ab92684d8..b7b6b58dd348 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -561,6 +561,7 @@ enum ib_port_speed { IB_SPEED_EDR = 32, IB_SPEED_HDR = 64, IB_SPEED_NDR = 128, + IB_SPEED_XDR = 256, }; enum ib_stat_flag { @@ -607,7 +608,7 @@ struct rdma_hw_stats { const struct rdma_stat_desc *descs; unsigned long *is_disabled; int num_counters; - u64 value[]; + u64 value[] __counted_by(num_counters); }; #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 @@ -840,6 +841,7 @@ enum ib_rate { IB_RATE_50_GBPS = 20, IB_RATE_400_GBPS = 21, IB_RATE_600_GBPS = 22, + IB_RATE_800_GBPS = 23, }; /** @@ -1094,7 +1096,7 @@ struct ib_qp_cap { /* * Maximum number of rdma_rw_ctx structures in flight at a time. - * ib_create_qp() will calculate the right amount of neededed WRs + * ib_create_qp() will calculate the right amount of needed WRs * and MRs based on this. */ u32 max_rdma_ctxs; @@ -2608,6 +2610,8 @@ struct ib_device_ops { int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp); int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id); + int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq); + int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq); /* Device lifecycle callbacks */ /* @@ -2846,6 +2850,7 @@ struct ib_block_iter { /* internal states */ struct scatterlist *__sg; /* sg holding the current aligned block */ dma_addr_t __dma_addr; /* unaligned DMA address of this block */ + size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */ unsigned int __sg_nents; /* number of SG entries */ unsigned int __sg_advance; /* number of bytes to advance in sg in next step */ unsigned int __pg_bit; /* alignment of current block */ diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 8a43534eea5c..f5257103fdb6 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -404,8 +404,6 @@ cmd_to_domain_dev(struct scsi_cmnd *cmd) return sdev_to_domain_dev(cmd->device); } -void sas_hash_addr(u8 *hashed, const u8 *sas_addr); - /* Before calling a notify event, LLDD should use this function * when the link is severed (possibly from its tasklet). * The idea is that the Class only reads those, while the LLDD, @@ -681,7 +679,6 @@ extern void sas_resume_ha(struct sas_ha_struct *sas_ha); extern void sas_resume_ha_no_sync(struct sas_ha_struct *sas_ha); extern void sas_suspend_ha(struct sas_ha_struct *sas_ha); -int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates); int sas_phy_reset(struct sas_phy *phy, int hard_reset); int sas_phy_enable(struct sas_phy *phy, int enable); extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); @@ -699,20 +696,6 @@ extern struct scsi_transport_template * sas_domain_attach_transport(struct sas_domain_function_template *); extern struct device_attribute dev_attr_phy_event_threshold; -int sas_discover_root_expander(struct domain_device *); - -int sas_ex_revalidate_domain(struct domain_device *); - -void sas_unregister_domain_devices(struct asd_sas_port *port, int gone); -void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *); -void sas_discover_event(struct asd_sas_port *, enum discover_event ev); - -int sas_discover_end_dev(struct domain_device *); - -void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *); - -void sas_init_dev(struct domain_device *); - void sas_task_abort(struct sas_task *); int sas_eh_abort_handler(struct scsi_cmnd *cmd); int sas_eh_device_reset_handler(struct scsi_cmnd *cmd); diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index ec093594ba53..4498f845b112 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -157,6 +157,9 @@ enum scsi_disposition { #define SCSI_3 4 /* SPC */ #define SCSI_SPC_2 5 #define SCSI_SPC_3 6 +#define SCSI_SPC_4 7 +#define SCSI_SPC_5 8 +#define SCSI_SPC_6 14 /* * INQ PERIPHERAL QUALIFIERS diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index b9230b6add04..5ec1e71a09de 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -161,6 +161,32 @@ struct scsi_device { * pass settings from slave_alloc to scsi * core. */ unsigned int eh_timeout; /* Error handling timeout */ + + /* + * If true, let the high-level device driver (sd) manage the device + * power state for system suspend/resume (suspend to RAM and + * hibernation) operations. + */ + unsigned manage_system_start_stop:1; + + /* + * If true, let the high-level device driver (sd) manage the device + * power state for runtime device suspand and resume operations. + */ + unsigned manage_runtime_start_stop:1; + + /* + * If true, let the high-level device driver (sd) manage the device + * power state for system shutdown (power off) operations. + */ + unsigned manage_shutdown:1; + + /* + * If set and if the device is runtime suspended, ask the high-level + * device driver (sd) to force a runtime resume of the device. + */ + unsigned force_runtime_start_on_system_start:1; + unsigned removable:1; unsigned changed:1; /* Data invalid due to media change */ unsigned busy:1; /* Used to prevent races */ @@ -193,8 +219,6 @@ struct scsi_device { unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */ unsigned no_start_on_add:1; /* do not issue start on add */ unsigned allow_restart:1; /* issue START_UNIT in error handler */ - unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */ - unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */ unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ unsigned select_no_atn:1; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index a2b8d30c4c80..3b907fc2ef08 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -245,6 +245,9 @@ struct scsi_host_template { * midlayer calls this point so that the driver may deallocate * and terminate any references to the target. * + * Note: This callback is called with the host lock held and hence + * must not sleep. + * * Status: OPTIONAL */ void (* target_destroy)(struct scsi_target *); @@ -764,7 +767,7 @@ scsi_template_proc_dir(const struct scsi_host_template *sht); #define scsi_template_proc_dir(sht) NULL #endif extern void scsi_scan_host(struct Scsi_Host *); -extern void scsi_rescan_device(struct device *); +extern int scsi_rescan_device(struct scsi_device *sdev); extern void scsi_remove_host(struct Scsi_Host *); extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); extern int scsi_host_busy(struct Scsi_Host *shost); diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index eb5079904cc8..af793f2a0ec4 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -258,7 +258,7 @@ static inline int qe_alive_during_sleep(void) /* Structure that defines QE firmware binary files. * - * See Documentation/powerpc/qe_firmware.rst for a description of these + * See Documentation/arch/powerpc/qe_firmware.rst for a description of these * fields. */ struct qe_firmware { diff --git a/include/soc/fsl/qe/qmc.h b/include/soc/fsl/qe/qmc.h index 3c61a50d2ae2..2a333fc1ea81 100644 --- a/include/soc/fsl/qe/qmc.h +++ b/include/soc/fsl/qe/qmc.h @@ -9,6 +9,7 @@ #ifndef __SOC_FSL_QMC_H__ #define __SOC_FSL_QMC_H__ +#include <linux/bits.h> #include <linux/types.h> struct device_node; @@ -16,9 +17,11 @@ struct device; struct qmc_chan; struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name); +struct qmc_chan *qmc_chan_get_bychild(struct device_node *np); void qmc_chan_put(struct qmc_chan *chan); struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev, struct device_node *np, const char *phandle_name); +struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev, struct device_node *np); enum qmc_mode { QMC_TRANSPARENT, @@ -37,6 +40,16 @@ struct qmc_chan_info { int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info); +struct qmc_chan_ts_info { + u64 rx_ts_mask_avail; + u64 tx_ts_mask_avail; + u64 rx_ts_mask; + u64 tx_ts_mask; +}; + +int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info); +int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info); + struct qmc_chan_param { enum qmc_mode mode; union { @@ -56,8 +69,20 @@ int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length, void (*complete)(void *context), void *context); +/* Flags available (ORed) for read complete() flags parameter in HDLC mode. + * No flags are available in transparent mode and the read complete() flags + * parameter has no meaning in transparent mode. + */ +#define QMC_RX_FLAG_HDLC_LAST BIT(11) /* Last in frame */ +#define QMC_RX_FLAG_HDLC_FIRST BIT(10) /* First in frame */ +#define QMC_RX_FLAG_HDLC_OVF BIT(5) /* Data overflow */ +#define QMC_RX_FLAG_HDLC_UNA BIT(4) /* Unaligned (ie. bits received not multiple of 8) */ +#define QMC_RX_FLAG_HDLC_ABORT BIT(3) /* Received an abort sequence (seven consecutive ones) */ +#define QMC_RX_FLAG_HDLC_CRC BIT(2) /* CRC error */ + int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length, - void (*complete)(void *context, size_t length), + void (*complete)(void *context, size_t length, + unsigned int flags), void *context); #define QMC_CHAN_READ (1<<0) diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h index f916dcde457f..09722f83b0ca 100644 --- a/include/soc/microchip/mpfs.h +++ b/include/soc/microchip/mpfs.h @@ -38,6 +38,8 @@ int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, struct mp struct mpfs_sys_controller *mpfs_sys_controller_get(struct device *dev); +struct mtd_info *mpfs_sys_controller_get_flash(struct mpfs_sys_controller *mpfs_client); + #endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */ #if IS_ENABLED(CONFIG_MCHP_CLK_MPFS) diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h index c47cc71a999e..17a0a8c3d656 100644 --- a/include/soc/qcom/qcom-spmi-pmic.h +++ b/include/soc/qcom/qcom-spmi-pmic.h @@ -31,6 +31,7 @@ #define PM8998_SUBTYPE 0x14 #define PMI8998_SUBTYPE 0x15 #define PM8005_SUBTYPE 0x18 +#define PM8937_SUBTYPE 0x19 #define PM660L_SUBTYPE 0x1a #define PM660_SUBTYPE 0x1b #define PM8150_SUBTYPE 0x1e diff --git a/include/soc/rockchip/rk3399_grf.h b/include/soc/rockchip/rk3399_grf.h index 3eebabcb2812..39cd44cec982 100644 --- a/include/soc/rockchip/rk3399_grf.h +++ b/include/soc/rockchip/rk3399_grf.h @@ -11,11 +11,8 @@ /* PMU GRF Registers */ #define RK3399_PMUGRF_OS_REG2 0x308 -#define RK3399_PMUGRF_DDRTYPE_SHIFT 13 -#define RK3399_PMUGRF_DDRTYPE_MASK 7 -#define RK3399_PMUGRF_DDRTYPE_DDR3 3 -#define RK3399_PMUGRF_DDRTYPE_LPDDR2 5 -#define RK3399_PMUGRF_DDRTYPE_LPDDR3 6 -#define RK3399_PMUGRF_DDRTYPE_LPDDR4 7 +#define RK3399_PMUGRF_OS_REG2_DDRTYPE GENMASK(15, 13) +#define RK3399_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2) +#define RK3399_PMUGRF_OS_REG2_BW_CH1 GENMASK(19, 18) #endif diff --git a/include/soc/rockchip/rk3568_grf.h b/include/soc/rockchip/rk3568_grf.h new file mode 100644 index 000000000000..52853efd6720 --- /dev/null +++ b/include/soc/rockchip/rk3568_grf.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef __SOC_RK3568_GRF_H +#define __SOC_RK3568_GRF_H + +#define RK3568_PMUGRF_OS_REG2 0x208 +#define RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO GENMASK(15, 13) +#define RK3568_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2) + +#define RK3568_PMUGRF_OS_REG3 0x20c +#define RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12) +#define RK3568_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28) + +#endif /* __SOC_RK3568_GRF_H */ diff --git a/include/soc/rockchip/rk3588_grf.h b/include/soc/rockchip/rk3588_grf.h new file mode 100644 index 000000000000..630b35a55064 --- /dev/null +++ b/include/soc/rockchip/rk3588_grf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef __SOC_RK3588_GRF_H +#define __SOC_RK3588_GRF_H + +#define RK3588_PMUGRF_OS_REG2 0x208 +#define RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO GENMASK(15, 13) +#define RK3588_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2) +#define RK3588_PMUGRF_OS_REG2_BW_CH1 GENMASK(19, 18) +#define RK3588_PMUGRF_OS_REG2_CH_INFO GENMASK(29, 28) + +#define RK3588_PMUGRF_OS_REG3 0x20c +#define RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12) +#define RK3588_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28) + +#define RK3588_PMUGRF_OS_REG4 0x210 +#define RK3588_PMUGRF_OS_REG5 0x214 + +#endif /* __SOC_RK3588_GRF_H */ diff --git a/include/soc/rockchip/rockchip_grf.h b/include/soc/rockchip/rockchip_grf.h new file mode 100644 index 000000000000..e46fd72aea8d --- /dev/null +++ b/include/soc/rockchip/rockchip_grf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Rockchip General Register Files definitions + */ + +#ifndef __SOC_ROCKCHIP_GRF_H +#define __SOC_ROCKCHIP_GRF_H + +/* Rockchip DDRTYPE defines */ +enum { + ROCKCHIP_DDRTYPE_DDR3 = 3, + ROCKCHIP_DDRTYPE_LPDDR2 = 5, + ROCKCHIP_DDRTYPE_LPDDR3 = 6, + ROCKCHIP_DDRTYPE_LPDDR4 = 7, + ROCKCHIP_DDRTYPE_LPDDR4X = 8, +}; + +#endif /* __SOC_ROCKCHIP_GRF_H */ diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h index ecefcaec7e66..6b995a8f0f6d 100644 --- a/include/soc/tegra/bpmp-abi.h +++ b/include/soc/tegra/bpmp-abi.h @@ -1194,7 +1194,7 @@ struct cmd_clk_is_enabled_request { */ struct cmd_clk_is_enabled_response { /** - * @brief The state of the clock that has been succesfully + * @brief The state of the clock that has been successfully * requested with CMD_CLK_ENABLE or CMD_CLK_DISABLE by the * master invoking the command earlier. * diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h index 5842e38bb288..f5e4ac5b8cce 100644 --- a/include/soc/tegra/bpmp.h +++ b/include/soc/tegra/bpmp.h @@ -102,8 +102,12 @@ struct tegra_bpmp { #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_mirror; #endif + + bool suspended; }; +#define TEGRA_BPMP_MESSAGE_RESET BIT(0) + struct tegra_bpmp_message { unsigned int mrq; @@ -117,6 +121,8 @@ struct tegra_bpmp_message { size_t size; int ret; } rx; + + unsigned long flags; }; #if IS_ENABLED(CONFIG_TEGRA_BPMP) diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index a5ef84944a06..af1d73a7f0cd 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -96,7 +96,6 @@ struct tegra_smmu_soc { struct tegra_mc; struct tegra_smmu; -struct gart_device; #ifdef CONFIG_TEGRA_IOMMU_SMMU struct tegra_smmu *tegra_smmu_probe(struct device *dev, @@ -116,28 +115,6 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu) } #endif -#ifdef CONFIG_TEGRA_IOMMU_GART -struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc); -int tegra_gart_suspend(struct gart_device *gart); -int tegra_gart_resume(struct gart_device *gart); -#else -static inline struct gart_device * -tegra_gart_probe(struct device *dev, struct tegra_mc *mc) -{ - return ERR_PTR(-ENODEV); -} - -static inline int tegra_gart_suspend(struct gart_device *gart) -{ - return -ENODEV; -} - -static inline int tegra_gart_resume(struct gart_device *gart) -{ - return -ENODEV; -} -#endif - struct tegra_mc_reset { const char *name; unsigned long id; @@ -185,7 +162,6 @@ struct tegra_mc_ops { */ int (*probe)(struct tegra_mc *mc); void (*remove)(struct tegra_mc *mc); - int (*suspend)(struct tegra_mc *mc); int (*resume)(struct tegra_mc *mc); irqreturn_t (*handle_irq)(int irq, void *data); int (*probe_device)(struct tegra_mc *mc, struct device *dev); @@ -225,7 +201,6 @@ struct tegra_mc { struct tegra_bpmp *bpmp; struct device *dev; struct tegra_smmu *smmu; - struct gart_device *gart; void __iomem *regs; void __iomem *bcast_ch_regs; void __iomem **ch_regs; diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h index c495c6d5fbe0..4bd3be3a3192 100644 --- a/include/sound/ac97_codec.h +++ b/include/sound/ac97_codec.h @@ -410,7 +410,7 @@ int snd_ac97_pcm_close(struct ac97_pcm *pcm); int snd_ac97_pcm_double_rate_rules(struct snd_pcm_runtime *runtime); /* ad hoc AC97 device driver access */ -extern struct bus_type ac97_bus_type; +extern const struct bus_type ac97_bus_type; /* AC97 platform_data adding function */ static inline void snd_ac97_dev_add_pdata(struct snd_ac97 *ac97, void *data) diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h index 1bf757901d02..68e053fe7340 100644 --- a/include/sound/cs35l41.h +++ b/include/sound/cs35l41.h @@ -11,7 +11,6 @@ #define __CS35L41_H #include <linux/regmap.h> -#include <linux/completion.h> #include <linux/firmware/cirrus/cs_dsp.h> #define CS35L41_FIRSTREG 0x00000000 @@ -736,6 +735,7 @@ #define CS35L41_REVID_B2 0xB2 #define CS35L41_HALO_CORE_RESET 0x00000200 +#define CS35L41_SOFTWARE_RESET 0x5A000000 #define CS35L41_FS1_WINDOW_MASK 0x000007FF #define CS35L41_FS2_WINDOW_MASK 0x00FFF800 @@ -816,6 +816,8 @@ struct cs35l41_otp_map_element_t { }; enum cs35l41_cspl_mbox_status { + CSPL_MBOX_STS_ERROR = U32_MAX, + CSPL_MBOX_STS_ERROR2 = 0x00ffffff, // firmware not always sign-extending 24-bit value CSPL_MBOX_STS_RUNNING = 0, CSPL_MBOX_STS_PAUSED = 1, CSPL_MBOX_STS_RDY_FOR_REINIT = 2, @@ -902,7 +904,8 @@ int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap); int cs35l41_init_boost(struct device *dev, struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg); bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type); +int cs35l41_mdsync_up(struct regmap *regmap); int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type, - int enable, struct completion *pll_lock, bool firmware_running); + int enable, struct cs_dsp *dsp); #endif /* __CS35L41_H */ diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h index 3950322bf3cb..b24716ab2750 100644 --- a/include/sound/cs35l56.h +++ b/include/sound/cs35l56.h @@ -75,6 +75,7 @@ #define CS35L56_DSP1_AHBM_WINDOW_DEBUG_0 0x25E2040 #define CS35L56_DSP1_AHBM_WINDOW_DEBUG_1 0x25E2044 #define CS35L56_DSP1_XMEM_UNPACKED24_0 0x2800000 +#define CS35L56_DSP1_FW_VER 0x2800010 #define CS35L56_DSP1_HALO_STATE_A1 0x2801E58 #define CS35L56_DSP1_HALO_STATE 0x28021E0 #define CS35L56_DSP1_PM_CUR_STATE_A1 0x2804000 @@ -241,10 +242,9 @@ #define CS35L56_CONTROL_PORT_READY_US 2200 #define CS35L56_HALO_STATE_POLL_US 1000 -#define CS35L56_HALO_STATE_TIMEOUT_US 50000 -#define CS35L56_HIBERNATE_WAKE_POLL_US 500 -#define CS35L56_HIBERNATE_WAKE_TIMEOUT_US 5000 +#define CS35L56_HALO_STATE_TIMEOUT_US 250000 #define CS35L56_RESET_PULSE_MIN_US 1100 +#define CS35L56_WAKE_HOLD_TIME_US 1000 #define CS35L56_SDW1_PLAYBACK_PORT 1 #define CS35L56_SDW1_CAPTURE_PORT 3 @@ -273,6 +273,7 @@ extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC]; extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC]; int cs35l56_set_patch(struct cs35l56_base *cs35l56_base); +int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base); int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command); int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base); int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base); @@ -285,7 +286,10 @@ int cs35l56_is_fw_reload_needed(struct cs35l56_base *cs35l56_base); int cs35l56_runtime_suspend_common(struct cs35l56_base *cs35l56_base); int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_soundwire); void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp); +int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base, + bool *fw_missing, unsigned int *fw_version); int cs35l56_hw_init(struct cs35l56_base *cs35l56_base); +int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base); int cs35l56_get_bclk_freq_id(unsigned int freq); void cs35l56_fill_supply_names(struct regulator_bulk_data *data); diff --git a/include/sound/cs4271.h b/include/sound/cs4271.h index 6ff23ab48894..5a55d135bdea 100644 --- a/include/sound/cs4271.h +++ b/include/sound/cs4271.h @@ -9,7 +9,6 @@ #define __CS4271_H struct cs4271_platform_data { - int gpio_nreset; /* GPIO driving Reset pin, if any */ bool amutec_eq_bmutec; /* flag to enable AMUTEC=BMUTEC */ /* diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h index c9a8bce9a785..d70c55f17df7 100644 --- a/include/sound/dmaengine_pcm.h +++ b/include/sound/dmaengine_pcm.h @@ -142,7 +142,7 @@ struct snd_dmaengine_pcm_config { struct snd_pcm_substream *substream); int (*process)(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, - struct iov_iter *buf, unsigned long bytes); + unsigned long bytes); dma_filter_fn compat_filter_fn; struct device *dma_dev; const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; diff --git a/include/sound/graph_card.h b/include/sound/graph_card.h index 4c8b94c77b8e..8e2e15dfcb1e 100644 --- a/include/sound/graph_card.h +++ b/include/sound/graph_card.h @@ -9,27 +9,27 @@ #include <sound/simple_card_utils.h> -typedef int (*GRAPH2_CUSTOM)(struct asoc_simple_priv *priv, +typedef int (*GRAPH2_CUSTOM)(struct simple_util_priv *priv, struct device_node *lnk, struct link_info *li); struct graph2_custom_hooks { - int (*hook_pre)(struct asoc_simple_priv *priv); - int (*hook_post)(struct asoc_simple_priv *priv); + int (*hook_pre)(struct simple_util_priv *priv); + int (*hook_post)(struct simple_util_priv *priv); GRAPH2_CUSTOM custom_normal; GRAPH2_CUSTOM custom_dpcm; GRAPH2_CUSTOM custom_c2c; }; -int audio_graph_parse_of(struct asoc_simple_priv *priv, struct device *dev); -int audio_graph2_parse_of(struct asoc_simple_priv *priv, struct device *dev, +int audio_graph_parse_of(struct simple_util_priv *priv, struct device *dev); +int audio_graph2_parse_of(struct simple_util_priv *priv, struct device *dev, struct graph2_custom_hooks *hooks); -int audio_graph2_link_normal(struct asoc_simple_priv *priv, +int audio_graph2_link_normal(struct simple_util_priv *priv, struct device_node *lnk, struct link_info *li); -int audio_graph2_link_dpcm(struct asoc_simple_priv *priv, +int audio_graph2_link_dpcm(struct simple_util_priv *priv, struct device_node *lnk, struct link_info *li); -int audio_graph2_link_c2c(struct asoc_simple_priv *priv, +int audio_graph2_link_c2c(struct simple_util_priv *priv, struct device_node *lnk, struct link_info *li); #endif /* __GRAPH_CARD_H */ diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 5497dc9c396a..9c94ba7c183d 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -141,6 +141,7 @@ struct hda_pcm_stream { hda_nid_t nid; /* default NID to query rates/formats/bps, or set up */ u32 rates; /* supported rates */ u64 formats; /* supported formats (SNDRV_PCM_FMTBIT_) */ + u32 subformats; /* for S32_LE format, SNDRV_PCM_SUBFMTBIT_* */ unsigned int maxbps; /* supported max. bit per sample */ const struct snd_pcm_chmap_elem *chmap; /* chmap to override */ struct hda_pcm_ops ops; @@ -448,8 +449,8 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid, #define snd_hda_codec_cleanup_stream(codec, nid) \ __snd_hda_codec_cleanup_stream(codec, nid, 0) -#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, bpsp) \ - snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, bpsp) +#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, subfmtp, bpsp) \ + snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, subfmtp, bpsp) #define snd_hda_is_supported_format(codec, nid, fmt) \ snd_hdac_is_supported_format(&(codec)->core, nid, fmt) diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h index 9c7872c0ca79..55958711d697 100644 --- a/include/sound/hda_register.h +++ b/include/sound/hda_register.h @@ -91,6 +91,8 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; #define AZX_REG_SD_BDLPL 0x18 #define AZX_REG_SD_BDLPU 0x1c +#define AZX_SD_FIFOSIZE_MASK GENMASK(15, 0) + /* GTS registers */ #define AZX_REG_LLCH 0x14 diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 32c59053b48e..a73d7f34f4e5 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -33,7 +33,7 @@ struct hda_device_id; /* * exported bus type */ -extern struct bus_type snd_hda_bus_type; +extern const struct bus_type snd_hda_bus_type; /* * generic arrays @@ -140,13 +140,14 @@ int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid, hda_nid_t *conn_list, int max_conns); int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid, hda_nid_t *start_id); -unsigned int snd_hdac_calc_stream_format(unsigned int rate, - unsigned int channels, - snd_pcm_format_t format, - unsigned int maxbps, - unsigned short spdif_ctls); +unsigned int snd_hdac_stream_format_bits(snd_pcm_format_t format, snd_pcm_subformat_t subformat, + unsigned int maxbits); +unsigned int snd_hdac_stream_format(unsigned int channels, unsigned int bits, unsigned int rate); +unsigned int snd_hdac_spdif_stream_format(unsigned int channels, unsigned int bits, + unsigned int rate, unsigned short spdif_ctls); int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid, - u32 *ratesp, u64 *formatsp, unsigned int *bpsp); + u32 *ratesp, u64 *formatsp, u32 *subformatsp, + unsigned int *bpsp); bool snd_hdac_is_supported_format(struct hdac_device *codec, hda_nid_t nid, unsigned int format); @@ -573,7 +574,7 @@ void snd_hdac_stream_release(struct hdac_stream *azx_dev); struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus, int dir, int stream_tag); -int snd_hdac_stream_setup(struct hdac_stream *azx_dev); +int snd_hdac_stream_setup(struct hdac_stream *azx_dev, bool code_loading); void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev); int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev); int snd_hdac_stream_set_params(struct hdac_stream *azx_dev, @@ -624,6 +625,9 @@ int snd_hdac_stream_set_lpib(struct hdac_stream *azx_dev, u32 value); #define snd_hdac_stream_readb_poll(dev, reg, val, cond, delay_us, timeout_us) \ read_poll_timeout_atomic(snd_hdac_reg_readb, val, cond, delay_us, timeout_us, \ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg) +#define snd_hdac_stream_readw_poll(dev, reg, val, cond, delay_us, timeout_us) \ + read_poll_timeout_atomic(snd_hdac_reg_readw, val, cond, delay_us, timeout_us, \ + false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg) #define snd_hdac_stream_readl_poll(dev, reg, val, cond, delay_us, timeout_us) \ read_poll_timeout_atomic(snd_hdac_reg_readl, val, cond, delay_us, timeout_us, \ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg) diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index 511211f4a2b6..a8bebac1e4b2 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -60,6 +60,8 @@ struct hdac_ext_stream { bool link_locked:1; bool link_prepared; + int (*host_setup)(struct hdac_stream *, bool); + struct snd_pcm_substream *link_substream; }; @@ -86,6 +88,7 @@ void snd_hdac_ext_stream_start(struct hdac_ext_stream *hext_stream); void snd_hdac_ext_stream_clear(struct hdac_ext_stream *hext_stream); void snd_hdac_ext_stream_reset(struct hdac_ext_stream *hext_stream); int snd_hdac_ext_stream_setup(struct hdac_ext_stream *hext_stream, int fmt); +int snd_hdac_ext_host_stream_setup(struct hdac_ext_stream *hext_stream, bool code_loading); struct hdac_ext_link { struct hdac_bus *bus; diff --git a/include/sound/max9768.h b/include/sound/max9768.h index 0f78b41d030e..8509ba0079b0 100644 --- a/include/sound/max9768.h +++ b/include/sound/max9768.h @@ -9,14 +9,10 @@ /** * struct max9768_pdata - optional platform specific MAX9768 configuration - * @shdn_gpio: GPIO to SHDN pin. If not valid, pin must be hardwired HIGH - * @mute_gpio: GPIO to MUTE pin. If not valid, control for mute won't be added * @flags: configuration flags, e.g. set classic PWM mode (check datasheet * regarding "filterless modulation" which is default). */ struct max9768_pdata { - int shdn_gpio; - int mute_gpio; unsigned flags; #define MAX9768_FLAG_CLASSIC_PWM (1 << 0) }; diff --git a/include/sound/opl3.h b/include/sound/opl3.h index ebf3852da9fe..052395a2f876 100644 --- a/include/sound/opl3.h +++ b/include/sound/opl3.h @@ -229,7 +229,7 @@ struct fm_operator { unsigned char attack_decay; unsigned char sustain_release; unsigned char wave_select; -} __attribute__((packed)); +} __packed; /* Instrument data */ struct fm_instrument { diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 2a815373dac1..cc175c623dac 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -32,6 +32,7 @@ struct snd_pcm_hardware { unsigned int info; /* SNDRV_PCM_INFO_* */ u64 formats; /* SNDRV_PCM_FMTBIT_* */ + u32 subformats; /* for S32_LE, SNDRV_PCM_SUBFMTBIT_* */ unsigned int rates; /* SNDRV_PCM_RATE_* */ unsigned int rate_min; /* min rate */ unsigned int rate_max; /* max rate */ @@ -217,6 +218,12 @@ struct snd_pcm_ops { #define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE #endif +#define _SNDRV_PCM_SUBFMTBIT(fmt) BIT((__force int)SNDRV_PCM_SUBFORMAT_##fmt) +#define SNDRV_PCM_SUBFMTBIT_STD _SNDRV_PCM_SUBFMTBIT(STD) +#define SNDRV_PCM_SUBFMTBIT_MSBITS_MAX _SNDRV_PCM_SUBFMTBIT(MSBITS_MAX) +#define SNDRV_PCM_SUBFMTBIT_MSBITS_20 _SNDRV_PCM_SUBFMTBIT(MSBITS_20) +#define SNDRV_PCM_SUBFMTBIT_MSBITS_24 _SNDRV_PCM_SUBFMTBIT(MSBITS_24) + struct snd_pcm_file { struct snd_pcm_substream *substream; int no_compat_mmap; diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h index ba184f49f7e1..fbf35df6e5cf 100644 --- a/include/sound/pcm_params.h +++ b/include/sound/pcm_params.h @@ -362,6 +362,8 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p) return snd_pcm_format_physical_width(params_format(p)); } +int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p); + static inline void params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt) { diff --git a/include/sound/rt5682s.h b/include/sound/rt5682s.h index 66ca0c75b914..006e6003d11c 100644 --- a/include/sound/rt5682s.h +++ b/include/sound/rt5682s.h @@ -31,6 +31,13 @@ enum rt5682s_dai_clks { RT5682S_DAI_NUM_CLKS, }; +enum { + RT5682S_LDO_1_607V, + RT5682S_LDO_1_5V, + RT5682S_LDO_1_406V, + RT5682S_LDO_1_731V, +}; + struct rt5682s_platform_data { enum rt5682s_dmic1_data_pin dmic1_data_pin; enum rt5682s_dmic1_clk_pin dmic1_clk_pin; @@ -38,6 +45,7 @@ struct rt5682s_platform_data { unsigned int dmic_clk_rate; unsigned int dmic_delay; unsigned int amic_delay; + unsigned int ldo_dacref; bool dmic_clk_driving_high; const char *dai_clk_names[RT5682S_DAI_NUM_CLKS]; diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h index d264e5463f22..2e999916dbd7 100644 --- a/include/sound/simple_card.h +++ b/include/sound/simple_card.h @@ -12,15 +12,15 @@ #include <sound/soc.h> #include <sound/simple_card_utils.h> -struct asoc_simple_card_info { +struct simple_util_info { const char *name; const char *card; const char *codec; const char *platform; unsigned int daifmt; - struct asoc_simple_dai cpu_dai; - struct asoc_simple_dai codec_dai; + struct simple_util_dai cpu_dai; + struct simple_util_dai codec_dai; }; #endif /* __SIMPLE_CARD_H */ diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index d1a95bc33c56..ad67957b7b48 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -11,18 +11,18 @@ #include <linux/clk.h> #include <sound/soc.h> -#define asoc_simple_init_hp(card, sjack, prefix) \ - asoc_simple_init_jack(card, sjack, 1, prefix, NULL) -#define asoc_simple_init_mic(card, sjack, prefix) \ - asoc_simple_init_jack(card, sjack, 0, prefix, NULL) +#define simple_util_init_hp(card, sjack, prefix) \ + simple_util_init_jack(card, sjack, 1, prefix, NULL) +#define simple_util_init_mic(card, sjack, prefix) \ + simple_util_init_jack(card, sjack, 0, prefix, NULL) -struct asoc_simple_tdm_width_map { +struct simple_util_tdm_width_map { u8 sample_bits; u8 slot_count; u16 slot_width; }; -struct asoc_simple_dai { +struct simple_util_dai { const char *name; unsigned int sysclk; int clk_direction; @@ -32,17 +32,17 @@ struct asoc_simple_dai { unsigned int rx_slot_mask; struct clk *clk; bool clk_fixed; - struct asoc_simple_tdm_width_map *tdm_width_map; + struct simple_util_tdm_width_map *tdm_width_map; int n_tdm_widths; }; -struct asoc_simple_data { +struct simple_util_data { u32 convert_rate; u32 convert_channels; const char *convert_sample_format; }; -struct asoc_simple_jack { +struct simple_util_jack { struct snd_soc_jack jack; struct snd_soc_jack_pin pin; struct snd_soc_jack_gpio gpio; @@ -54,21 +54,21 @@ struct prop_nums { int platforms; }; -struct asoc_simple_priv { +struct simple_util_priv { struct snd_soc_card snd_card; struct simple_dai_props { - struct asoc_simple_dai *cpu_dai; - struct asoc_simple_dai *codec_dai; - struct asoc_simple_data adata; + struct simple_util_dai *cpu_dai; + struct simple_util_dai *codec_dai; + struct simple_util_data adata; struct snd_soc_codec_conf *codec_conf; struct prop_nums num; unsigned int mclk_fs; } *dai_props; - struct asoc_simple_jack hp_jack; - struct asoc_simple_jack mic_jack; + struct simple_util_jack hp_jack; + struct simple_util_jack mic_jack; struct snd_soc_jack *aux_jacks; struct snd_soc_dai_link *dai_link; - struct asoc_simple_dai *dais; + struct simple_util_dai *dais; struct snd_soc_dai_link_component *dlcs; struct snd_soc_codec_conf *codec_conf; struct gpio_desc *pa_gpio; @@ -130,75 +130,78 @@ struct link_info { struct prop_nums num[SNDRV_MAX_LINKS]; }; -int asoc_simple_parse_daifmt(struct device *dev, +int simple_util_parse_daifmt(struct device *dev, struct device_node *node, struct device_node *codec, char *prefix, unsigned int *retfmt); -int asoc_simple_parse_tdm_width_map(struct device *dev, struct device_node *np, - struct asoc_simple_dai *dai); +int simple_util_parse_tdm_width_map(struct device *dev, struct device_node *np, + struct simple_util_dai *dai); __printf(3, 4) -int asoc_simple_set_dailink_name(struct device *dev, +int simple_util_set_dailink_name(struct device *dev, struct snd_soc_dai_link *dai_link, const char *fmt, ...); -int asoc_simple_parse_card_name(struct snd_soc_card *card, +int simple_util_parse_card_name(struct snd_soc_card *card, char *prefix); -int asoc_simple_parse_clk(struct device *dev, +int simple_util_parse_clk(struct device *dev, struct device_node *node, - struct asoc_simple_dai *simple_dai, + struct simple_util_dai *simple_dai, struct snd_soc_dai_link_component *dlc); -int asoc_simple_startup(struct snd_pcm_substream *substream); -void asoc_simple_shutdown(struct snd_pcm_substream *substream); -int asoc_simple_hw_params(struct snd_pcm_substream *substream, +int simple_util_startup(struct snd_pcm_substream *substream); +void simple_util_shutdown(struct snd_pcm_substream *substream); +int simple_util_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); -int asoc_simple_dai_init(struct snd_soc_pcm_runtime *rtd); -int asoc_simple_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, +int simple_util_dai_init(struct snd_soc_pcm_runtime *rtd); +int simple_util_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params); -#define asoc_simple_parse_tdm(np, dai) \ +#define simple_util_parse_tdm(np, dai) \ snd_soc_of_parse_tdm_slot(np, &(dai)->tx_slot_mask, \ &(dai)->rx_slot_mask, \ &(dai)->slots, \ &(dai)->slot_width); -void asoc_simple_canonicalize_platform(struct snd_soc_dai_link_component *platforms, +void simple_util_canonicalize_platform(struct snd_soc_dai_link_component *platforms, struct snd_soc_dai_link_component *cpus); -void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link_component *cpus, +void simple_util_canonicalize_cpu(struct snd_soc_dai_link_component *cpus, int is_single_links); -void asoc_simple_clean_reference(struct snd_soc_card *card); +void simple_util_clean_reference(struct snd_soc_card *card); -void asoc_simple_parse_convert(struct device_node *np, char *prefix, - struct asoc_simple_data *data); -bool asoc_simple_is_convert_required(const struct asoc_simple_data *data); +void simple_util_parse_convert(struct device_node *np, char *prefix, + struct simple_util_data *data); +bool simple_util_is_convert_required(const struct simple_util_data *data); -int asoc_simple_parse_routing(struct snd_soc_card *card, +int simple_util_parse_routing(struct snd_soc_card *card, char *prefix); -int asoc_simple_parse_widgets(struct snd_soc_card *card, +int simple_util_parse_widgets(struct snd_soc_card *card, char *prefix); -int asoc_simple_parse_pin_switches(struct snd_soc_card *card, +int simple_util_parse_pin_switches(struct snd_soc_card *card, char *prefix); -int asoc_simple_init_jack(struct snd_soc_card *card, - struct asoc_simple_jack *sjack, +int simple_util_init_jack(struct snd_soc_card *card, + struct simple_util_jack *sjack, int is_hp, char *prefix, char *pin); -int asoc_simple_init_aux_jacks(struct asoc_simple_priv *priv, +int simple_util_init_aux_jacks(struct simple_util_priv *priv, char *prefix); -int asoc_simple_init_priv(struct asoc_simple_priv *priv, +int simple_util_init_priv(struct simple_util_priv *priv, struct link_info *li); -int asoc_simple_remove(struct platform_device *pdev); +void simple_util_remove(struct platform_device *pdev); -int asoc_graph_card_probe(struct snd_soc_card *card); -int asoc_graph_is_ports0(struct device_node *port); -int asoc_graph_parse_dai(struct device *dev, struct device_node *ep, +int graph_util_card_probe(struct snd_soc_card *card); +int graph_util_is_ports0(struct device_node *port); +int graph_util_parse_dai(struct device *dev, struct device_node *ep, struct snd_soc_dai_link_component *dlc, int *is_single_link); +int graph_util_parse_link_direction(struct device_node *np, + bool *is_playback_only, bool *is_capture_only); + #ifdef DEBUG -static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv, +static inline void simple_util_debug_dai(struct simple_util_priv *priv, char *name, - struct asoc_simple_dai *dai) + struct simple_util_dai *dai) { struct device *dev = simple_priv_to_dev(priv); @@ -228,7 +231,7 @@ static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv, name, dai->clk_direction ? "OUT" : "IN"); } -static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) +static inline void simple_util_debug_info(struct simple_util_priv *priv) { struct snd_soc_card *card = simple_priv_to_card(priv); struct device *dev = simple_priv_to_dev(priv); @@ -241,7 +244,7 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) for (i = 0; i < card->num_links; i++) { struct simple_dai_props *props = simple_priv_to_props(priv, i); struct snd_soc_dai_link *link = simple_priv_to_link(priv, i); - struct asoc_simple_dai *dai; + struct simple_util_dai *dai; struct snd_soc_codec_conf *cnf; int j; @@ -249,10 +252,10 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) dev_dbg(dev, "cpu num = %d\n", link->num_cpus); for_each_prop_dai_cpu(props, j, dai) - asoc_simple_debug_dai(priv, "cpu", dai); + simple_util_debug_dai(priv, "cpu", dai); dev_dbg(dev, "codec num = %d\n", link->num_codecs); for_each_prop_dai_codec(props, j, dai) - asoc_simple_debug_dai(priv, "codec", dai); + simple_util_debug_dai(priv, "codec", dai); if (link->name) dev_dbg(dev, "dai name = %s\n", link->name); @@ -270,7 +273,7 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) } } #else -#define asoc_simple_debug_info(priv) +#define simple_util_debug_info(priv) #endif /* DEBUG */ #endif /* __SIMPLE_CARD_UTILS_H */ diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h index e49b97d9e3ff..845e7608ac37 100644 --- a/include/sound/soc-acpi-intel-match.h +++ b/include/sound/soc-acpi-intel-match.h @@ -32,6 +32,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_machines[]; +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[]; @@ -42,6 +43,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[]; +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[]; /* * generic table used for HDA codec-based platforms, possibly with diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 6d31d535e8f6..23d6d6bfb073 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -68,6 +68,10 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg) * @i2s_link_mask: I2S/TDM links enabled on the board * @num_dai_drivers: number of elements in @dai_drivers * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode + * @subsystem_vendor: optional PCI SSID vendor value + * @subsystem_device: optional PCI SSID device value + * @subsystem_id_set: true if a value has been written to + * subsystem_vendor and subsystem_device. */ struct snd_soc_acpi_mach_params { u32 acpi_ipc_irq_index; @@ -80,6 +84,9 @@ struct snd_soc_acpi_mach_params { u32 i2s_link_mask; u32 num_dai_drivers; struct snd_soc_dai_driver *dai_drivers; + unsigned short subsystem_vendor; + unsigned short subsystem_device; + bool subsystem_id_set; }; /** diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h index fc94dfb0021f..ecc02e955279 100644 --- a/include/sound/soc-card.h +++ b/include/sound/soc-card.h @@ -59,6 +59,43 @@ int snd_soc_card_add_dai_link(struct snd_soc_card *card, void snd_soc_card_remove_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link); +#ifdef CONFIG_PCI +static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card, + unsigned short vendor, + unsigned short device) +{ + card->pci_subsystem_vendor = vendor; + card->pci_subsystem_device = device; + card->pci_subsystem_set = true; +} + +static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card, + unsigned short *vendor, + unsigned short *device) +{ + if (!card->pci_subsystem_set) + return -ENOENT; + + *vendor = card->pci_subsystem_vendor; + *device = card->pci_subsystem_device; + + return 0; +} +#else /* !CONFIG_PCI */ +static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card, + unsigned short vendor, + unsigned short device) +{ +} + +static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card, + unsigned short *vendor, + unsigned short *device) +{ + return -ENOENT; +} +#endif /* CONFIG_PCI */ + /* device driver data */ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card, void *data) @@ -78,8 +115,8 @@ struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card, struct snd_soc_pcm_runtime *rtd; for_each_card_rtds(card, rtd) { - if (!strcmp(asoc_rtd_to_codec(rtd, 0)->name, dai_name)) - return asoc_rtd_to_codec(rtd, 0); + if (!strcmp(snd_soc_rtd_to_codec(rtd, 0)->name, dai_name)) + return snd_soc_rtd_to_codec(rtd, 0); } return NULL; diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 17bea3144551..ceca69b46a82 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -139,7 +139,7 @@ struct snd_soc_component_driver { struct snd_pcm_audio_tstamp_report *audio_tstamp_report); int (*copy)(struct snd_soc_component *component, struct snd_pcm_substream *substream, int channel, - unsigned long pos, struct iov_iter *buf, + unsigned long pos, struct iov_iter *iter, unsigned long bytes); struct page *(*page)(struct snd_soc_component *component, struct snd_pcm_substream *substream, @@ -511,7 +511,7 @@ int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream, int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream); int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream, int channel, unsigned long pos, - struct iov_iter *buf, unsigned long bytes); + struct iov_iter *iter, unsigned long bytes); struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream, unsigned long offset); int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream, diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 5fcfba47d98c..adcd8719d343 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -370,6 +370,7 @@ struct snd_soc_dai_ops { /* bit field */ unsigned int no_capture_mute:1; + unsigned int mute_unmute_on_trigger:1; }; struct snd_soc_cdai_ops { diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index d2faec9a323e..667ecd4daa68 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -469,6 +469,7 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card); int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai); +int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s); /* dapm path setup */ int snd_soc_dapm_new_widgets(struct snd_soc_card *card); @@ -717,7 +718,7 @@ struct snd_soc_dapm_context { /* A list of widgets associated with an object, typically a snd_kcontrol */ struct snd_soc_dapm_widget_list { int num_widgets; - struct snd_soc_dapm_widget *widgets[]; + struct snd_soc_dapm_widget *widgets[] __counted_by(num_widgets); }; #define for_each_dapm_widgets(list, i, widget) \ diff --git a/include/sound/soc.h b/include/sound/soc.h index fa2337a3cf4c..6defc5547ff9 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -10,6 +10,7 @@ #ifndef __LINUX_SND_SOC_H #define __LINUX_SND_SOC_H +#include <linux/args.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/types.h> @@ -619,6 +620,7 @@ enum snd_soc_trigger_order { struct snd_soc_pcm_stream { const char *stream_name; u64 formats; /* SNDRV_PCM_FMTBIT_* */ + u32 subformats; /* for S32_LE format, SNDRV_PCM_SUBFMTBIT_* */ unsigned int rates; /* SNDRV_PCM_RATE_* */ unsigned int rate_min; /* min rate */ unsigned int rate_max; /* max rate */ @@ -654,8 +656,45 @@ struct snd_soc_dai_link_component { struct of_phandle_args *dai_args; }; -struct snd_soc_dai_link_codec_ch_map { - unsigned int connected_cpu_id; +/* + * [dai_link->ch_maps Image sample] + * + *------------------------- + * CPU0 <---> Codec0 + * + * ch-map[0].cpu = 0 ch-map[0].codec = 0 + * + *------------------------- + * CPU0 <---> Codec0 + * CPU1 <---> Codec1 + * CPU2 <---> Codec2 + * + * ch-map[0].cpu = 0 ch-map[0].codec = 0 + * ch-map[1].cpu = 1 ch-map[1].codec = 1 + * ch-map[2].cpu = 2 ch-map[2].codec = 2 + * + *------------------------- + * CPU0 <---> Codec0 + * CPU1 <-+-> Codec1 + * CPU2 <-/ + * + * ch-map[0].cpu = 0 ch-map[0].codec = 0 + * ch-map[1].cpu = 1 ch-map[1].codec = 1 + * ch-map[2].cpu = 2 ch-map[2].codec = 1 + * + *------------------------- + * CPU0 <---> Codec0 + * CPU1 <-+-> Codec1 + * \-> Codec2 + * + * ch-map[0].cpu = 0 ch-map[0].codec = 0 + * ch-map[1].cpu = 1 ch-map[1].codec = 1 + * ch-map[2].cpu = 1 ch-map[2].codec = 2 + * + */ +struct snd_soc_dai_link_ch_map { + unsigned int cpu; + unsigned int codec; unsigned int ch_mask; }; @@ -687,7 +726,9 @@ struct snd_soc_dai_link { struct snd_soc_dai_link_component *codecs; unsigned int num_codecs; - struct snd_soc_dai_link_codec_ch_map *codec_ch_maps; + /* num_ch_maps = max(num_cpu, num_codecs) */ + struct snd_soc_dai_link_ch_map *ch_maps; + /* * You MAY specify the link's platform/PCM/DMA driver, either by * device name, or by DT/OF node, but not both. Some forms of link @@ -774,37 +815,47 @@ struct snd_soc_dai_link { #endif }; +static inline int snd_soc_link_num_ch_map(struct snd_soc_dai_link *link) { + return max(link->num_cpus, link->num_codecs); +} + static inline struct snd_soc_dai_link_component* -asoc_link_to_cpu(struct snd_soc_dai_link *link, int n) { +snd_soc_link_to_cpu(struct snd_soc_dai_link *link, int n) { return &(link)->cpus[n]; } static inline struct snd_soc_dai_link_component* -asoc_link_to_codec(struct snd_soc_dai_link *link, int n) { +snd_soc_link_to_codec(struct snd_soc_dai_link *link, int n) { return &(link)->codecs[n]; } static inline struct snd_soc_dai_link_component* -asoc_link_to_platform(struct snd_soc_dai_link *link, int n) { +snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) { return &(link)->platforms[n]; } #define for_each_link_codecs(link, i, codec) \ for ((i) = 0; \ ((i) < link->num_codecs) && \ - ((codec) = asoc_link_to_codec(link, i)); \ + ((codec) = snd_soc_link_to_codec(link, i)); \ (i)++) #define for_each_link_platforms(link, i, platform) \ for ((i) = 0; \ ((i) < link->num_platforms) && \ - ((platform) = asoc_link_to_platform(link, i)); \ + ((platform) = snd_soc_link_to_platform(link, i)); \ (i)++) #define for_each_link_cpus(link, i, cpu) \ for ((i) = 0; \ ((i) < link->num_cpus) && \ - ((cpu) = asoc_link_to_cpu(link, i)); \ + ((cpu) = snd_soc_link_to_cpu(link, i)); \ + (i)++) + +#define for_each_link_ch_maps(link, i, ch_map) \ + for ((i) = 0; \ + ((i) < snd_soc_link_num_ch_map(link) && \ + ((ch_map) = link->ch_maps + i)); \ (i)++) /* @@ -870,12 +921,8 @@ asoc_link_to_platform(struct snd_soc_dai_link *link, int n) { .platforms = platform, \ .num_platforms = ARRAY_SIZE(platform) -#define SND_SOC_DAILINK_REGx(_1, _2, _3, func, ...) func #define SND_SOC_DAILINK_REG(...) \ - SND_SOC_DAILINK_REGx(__VA_ARGS__, \ - SND_SOC_DAILINK_REG3, \ - SND_SOC_DAILINK_REG2, \ - SND_SOC_DAILINK_REG1)(__VA_ARGS__) + CONCATENATE(SND_SOC_DAILINK_REG, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) #define SND_SOC_DAILINK_DEF(name, def...) \ static struct snd_soc_dai_link_component name[] = { def } @@ -892,10 +939,10 @@ asoc_link_to_platform(struct snd_soc_dai_link *link, int n) { #define COMP_PLATFORM(_name) { .name = _name } #define COMP_AUX(_name) { .name = _name } #define COMP_CODEC_CONF(_name) { .name = _name } -#define COMP_DUMMY() { .name = "snd-soc-dummy", .dai_name = "snd-soc-dummy-dai", } +#define COMP_DUMMY() /* see snd_soc_fill_dummy_dai() */ extern struct snd_soc_dai_link_component null_dailink_component[0]; -extern struct snd_soc_dai_link_component asoc_dummy_dlc; +extern struct snd_soc_dai_link_component snd_soc_dummy_dlc; struct snd_soc_codec_conf { @@ -932,6 +979,17 @@ struct snd_soc_card { #ifdef CONFIG_DMI char dmi_longname[80]; #endif /* CONFIG_DMI */ + +#ifdef CONFIG_PCI + /* + * PCI does not define 0 as invalid, so pci_subsystem_set indicates + * whether a value has been written to these fields. + */ + unsigned short pci_subsystem_vendor; + unsigned short pci_subsystem_device; + bool pci_subsystem_set; +#endif /* CONFIG_PCI */ + char topology_shortname[32]; struct device *dev; @@ -1102,8 +1160,8 @@ struct snd_soc_pcm_runtime { * dais = cpu_dai + codec_dai * see * soc_new_pcm_runtime() - * asoc_rtd_to_cpu() - * asoc_rtd_to_codec() + * snd_soc_rtd_to_cpu() + * snd_soc_rtd_to_codec() */ struct snd_soc_dai **dais; @@ -1126,13 +1184,16 @@ struct snd_soc_pcm_runtime { unsigned int pop_wait:1; unsigned int fe_compr:1; /* for Dynamic PCM */ + bool initialized; + int num_components; struct snd_soc_component *components[]; /* CPU/Codec/Platform */ }; + /* see soc_new_pcm_runtime() */ -#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n] -#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus] -#define asoc_substream_to_rtd(substream) \ +#define snd_soc_rtd_to_cpu(rtd, n) (rtd)->dais[n] +#define snd_soc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus] +#define snd_soc_substream_to_rtd(substream) \ (struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream) #define for_each_rtd_components(rtd, i, component) \ @@ -1141,17 +1202,18 @@ struct snd_soc_pcm_runtime { (i)++) #define for_each_rtd_cpu_dais(rtd, i, dai) \ for ((i) = 0; \ - ((i) < rtd->dai_link->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \ + ((i) < rtd->dai_link->num_cpus) && ((dai) = snd_soc_rtd_to_cpu(rtd, i)); \ (i)++) #define for_each_rtd_codec_dais(rtd, i, dai) \ for ((i) = 0; \ - ((i) < rtd->dai_link->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \ + ((i) < rtd->dai_link->num_codecs) && ((dai) = snd_soc_rtd_to_codec(rtd, i)); \ (i)++) #define for_each_rtd_dais(rtd, i, dai) \ for ((i) = 0; \ ((i) < (rtd)->dai_link->num_cpus + (rtd)->dai_link->num_codecs) && \ ((dai) = (rtd)->dais[i]); \ (i)++) +#define for_each_rtd_ch_maps(rtd, i, ch_maps) for_each_link_ch_maps(rtd->dai_link, i, ch_maps) void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd); diff --git a/include/sound/sof.h b/include/sound/sof.h index d3c41f87ac31..05213bb515a3 100644 --- a/include/sound/sof.h +++ b/include/sound/sof.h @@ -52,11 +52,23 @@ enum sof_dsp_power_states { /* Definitions for multiple IPCs */ enum sof_ipc_type { - SOF_IPC, - SOF_INTEL_IPC4, + SOF_IPC_TYPE_3, + SOF_IPC_TYPE_4, SOF_IPC_TYPE_COUNT }; +struct sof_loadable_file_profile { + enum sof_ipc_type ipc_type; + + const char *fw_path; + const char *fw_path_postfix; + const char *fw_name; + const char *fw_lib_path; + const char *fw_lib_path_postfix; + const char *tplg_path; + const char *tplg_name; +}; + /* * SOF Platform data. */ @@ -64,6 +76,14 @@ struct snd_sof_pdata { const char *name; const char *platform; + /* + * PCI SSID. As PCI does not define 0 as invalid, the subsystem_id_set + * flag indicates that a value has been written to these members. + */ + unsigned short subsystem_vendor; + unsigned short subsystem_device; + bool subsystem_id_set; + struct device *dev; /* @@ -78,6 +98,9 @@ struct snd_sof_pdata { /* descriptor */ const struct sof_dev_desc *desc; + /* platform's preferred IPC type and path overrides */ + struct sof_loadable_file_profile ipc_file_profile_base; + /* firmware and topology filenames */ const char *fw_filename_prefix; const char *fw_filename; diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h index ca8325353d41..6bc987bd4761 100644 --- a/include/sound/sof/dai-imx.h +++ b/include/sound/sof/dai-imx.h @@ -51,4 +51,11 @@ struct sof_ipc_dai_sai_params { uint16_t tdm_slot_width; uint16_t reserved2; /* alignment */ } __packed; + +/* MICFIL Configuration Request - SOF_IPC_DAI_MICFIL_CONFIG */ +struct sof_ipc_dai_micfil_params { + uint32_t pdm_rate; + uint32_t pdm_ch; +} __packed; + #endif diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h index 3041f5805b7b..4773a5f616a4 100644 --- a/include/sound/sof/dai.h +++ b/include/sound/sof/dai.h @@ -88,6 +88,7 @@ enum sof_ipc_dai_type { SOF_DAI_AMD_HS, /**< Amd HS */ SOF_DAI_AMD_SP_VIRTUAL, /**< AMD ACP SP VIRTUAL */ SOF_DAI_AMD_HS_VIRTUAL, /**< AMD ACP HS VIRTUAL */ + SOF_DAI_IMX_MICFIL, /** < i.MX MICFIL PDM */ }; /* general purpose DAI configuration */ @@ -117,6 +118,7 @@ struct sof_ipc_dai_config { struct sof_ipc_dai_acpdmic_params acpdmic; struct sof_ipc_dai_acp_params acphs; struct sof_ipc_dai_mtk_afe_params afe; + struct sof_ipc_dai_micfil_params micfil; }; } __packed; diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h index 78568abe2673..1eb538e18236 100644 --- a/include/sound/sof/ipc4/header.h +++ b/include/sound/sof/ipc4/header.h @@ -106,12 +106,19 @@ enum sof_ipc4_global_msg { SOF_IPC4_GLB_SAVE_PIPELINE, SOF_IPC4_GLB_RESTORE_PIPELINE, - /* Loads library (using Code Load or HD/A Host Output DMA) */ + /* + * library loading + * + * Loads library (using Code Load or HD/A Host Output DMA) + */ SOF_IPC4_GLB_LOAD_LIBRARY, + /* + * Prepare the host DMA channel for library loading, must be followed by + * a SOF_IPC4_GLB_LOAD_LIBRARY message as the library loading step + */ + SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE, - /* 25: RESERVED - do not use */ - - SOF_IPC4_GLB_INTERNAL_MESSAGE = 26, + SOF_IPC4_GLB_INTERNAL_MESSAGE, /* Notification (FW to SW driver) */ SOF_IPC4_GLB_NOTIFICATION, @@ -416,6 +423,12 @@ enum sof_ipc4_fw_config_params { SOF_IPC4_FW_CFG_RESERVED, SOF_IPC4_FW_CFG_POWER_GATING_POLICY, SOF_IPC4_FW_CFG_ASSERT_MODE, + SOF_IPC4_FW_RESERVED1, + SOF_IPC4_FW_RESERVED2, + SOF_IPC4_FW_RESERVED3, + SOF_IPC4_FW_RESERVED4, + SOF_IPC4_FW_RESERVED5, + SOF_IPC4_FW_CONTEXT_SAVE }; struct sof_ipc4_fw_version { @@ -508,6 +521,52 @@ struct sof_ipc4_notify_resource_data { uint32_t data[6]; } __packed __aligned(4); +#define SOF_IPC4_DEBUG_DESCRIPTOR_SIZE 12 /* 3 x u32 */ + +/* + * The debug memory window is divided into 16 slots, and the + * first slot is used as a recorder for the other 15 slots. + */ +#define SOF_IPC4_MAX_DEBUG_SLOTS 15 +#define SOF_IPC4_DEBUG_SLOT_SIZE 0x1000 + +/* debug log slot types */ +#define SOF_IPC4_DEBUG_SLOT_UNUSED 0x00000000 +#define SOF_IPC4_DEBUG_SLOT_CRITICAL_LOG 0x54524300 /* byte 0: core ID */ +#define SOF_IPC4_DEBUG_SLOT_DEBUG_LOG 0x474f4c00 /* byte 0: core ID */ +#define SOF_IPC4_DEBUG_SLOT_GDB_STUB 0x42444700 +#define SOF_IPC4_DEBUG_SLOT_TELEMETRY 0x4c455400 +#define SOF_IPC4_DEBUG_SLOT_BROKEN 0x44414544 + +/** + * struct sof_ipc4_notify_module_data - payload for module notification + * @instance_id: instance ID of the originator module of the notification + * @module_id: module ID of the originator of the notification + * @event_id: module specific event id + * @event_data_size: Size of the @event_data (if any) in bytes + * @event_data: Optional notification data, module and notification dependent + */ +struct sof_ipc4_notify_module_data { + uint16_t instance_id; + uint16_t module_id; + uint32_t event_id; + uint32_t event_data_size; + uint8_t event_data[]; +} __packed __aligned(4); + +/* + * ALSA kcontrol change notification + * + * The event_id of struct sof_ipc4_notify_module_data is divided into two u16: + * upper u16: magic number for ALSA kcontrol types: 0xA15A + * lower u16: param_id of the control, which is the type of the control + * The event_data contains the struct sof_ipc4_control_msg_payload of the control + * which sent the notification. + */ +#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_MAGIC_MASK GENMASK(31, 16) +#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_MAGIC_VAL 0xA15A0000 +#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_PARAMID_MASK GENMASK(15, 0) + /** @}*/ #endif diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h index 906e2f327ad2..b3ca886fa28f 100644 --- a/include/sound/sof/topology.h +++ b/include/sound/sof/topology.h @@ -39,6 +39,7 @@ enum sof_comp_type { SOF_COMP_ASRC, /**< Asynchronous sample rate converter */ SOF_COMP_DCBLOCK, SOF_COMP_SMART_AMP, /**< smart amplifier component */ + SOF_COMP_MODULE_ADAPTER, /**< module adapter */ /* keep FILEREAD/FILEWRITE as the last ones */ SOF_COMP_FILEREAD = 10000, /**< host test based file IO */ SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */ @@ -59,7 +60,7 @@ struct sof_ipc_comp { /* extended data length, 0 if no extended data */ uint32_t ext_data_length; -} __packed; +} __packed __aligned(4); /* * Component Buffers @@ -68,14 +69,15 @@ struct sof_ipc_comp { /* * SOF memory capabilities, add new ones at the end */ -#define SOF_MEM_CAPS_RAM (1 << 0) -#define SOF_MEM_CAPS_ROM (1 << 1) -#define SOF_MEM_CAPS_EXT (1 << 2) /**< external */ -#define SOF_MEM_CAPS_LP (1 << 3) /**< low power */ -#define SOF_MEM_CAPS_HP (1 << 4) /**< high performance */ -#define SOF_MEM_CAPS_DMA (1 << 5) /**< DMA'able */ -#define SOF_MEM_CAPS_CACHE (1 << 6) /**< cacheable */ -#define SOF_MEM_CAPS_EXEC (1 << 7) /**< executable */ +#define SOF_MEM_CAPS_RAM BIT(0) +#define SOF_MEM_CAPS_ROM BIT(1) +#define SOF_MEM_CAPS_EXT BIT(2) /**< external */ +#define SOF_MEM_CAPS_LP BIT(3) /**< low power */ +#define SOF_MEM_CAPS_HP BIT(4) /**< high performance */ +#define SOF_MEM_CAPS_DMA BIT(5) /**< DMA'able */ +#define SOF_MEM_CAPS_CACHE BIT(6) /**< cacheable */ +#define SOF_MEM_CAPS_EXEC BIT(7) /**< executable */ +#define SOF_MEM_CAPS_L3 BIT(8) /**< L3 memory */ /* * overrun will cause ring buffer overwrite, instead of XRUN. @@ -87,6 +89,9 @@ struct sof_ipc_comp { */ #define SOF_BUF_UNDERRUN_PERMITTED BIT(1) +/* the UUID size in bytes, shared between FW and host */ +#define SOF_UUID_SIZE 16 + /* create new component buffer - SOF_IPC_TPLG_BUFFER_NEW */ struct sof_ipc_buffer { struct sof_ipc_comp comp; @@ -94,7 +99,7 @@ struct sof_ipc_buffer { uint32_t caps; /**< SOF_MEM_CAPS_ */ uint32_t flags; /**< SOF_BUF_ flags defined above */ uint32_t reserved; /**< reserved for future use */ -} __packed; +} __packed __aligned(4); /* generic component config data - must always be after struct sof_ipc_comp */ struct sof_ipc_comp_config { @@ -107,7 +112,7 @@ struct sof_ipc_comp_config { /* reserved for future use */ uint32_t reserved[2]; -} __packed; +} __packed __aligned(4); /* generic host component */ struct sof_ipc_comp_host { @@ -116,7 +121,7 @@ struct sof_ipc_comp_host { uint32_t direction; /**< SOF_IPC_STREAM_ */ uint32_t no_irq; /**< don't send periodic IRQ to host/DSP */ uint32_t dmac_config; /**< DMA engine specific */ -} __packed; +} __packed __aligned(4); /* generic DAI component */ struct sof_ipc_comp_dai { @@ -126,13 +131,13 @@ struct sof_ipc_comp_dai { uint32_t dai_index; /**< index of this type dai */ uint32_t type; /**< DAI type - SOF_DAI_ */ uint32_t reserved; /**< reserved */ -} __packed; +} __packed __aligned(4); /* generic mixer component */ struct sof_ipc_comp_mixer { struct sof_ipc_comp comp; struct sof_ipc_comp_config config; -} __packed; +} __packed __aligned(4); /* volume ramping types */ enum sof_volume_ramp { @@ -140,6 +145,8 @@ enum sof_volume_ramp { SOF_VOLUME_LOG, SOF_VOLUME_LINEAR_ZC, SOF_VOLUME_LOG_ZC, + SOF_VOLUME_WINDOWS_FADE, + SOF_VOLUME_WINDOWS_NO_FADE, }; /* generic volume component */ @@ -151,7 +158,7 @@ struct sof_ipc_comp_volume { uint32_t max_value; uint32_t ramp; /**< SOF_VOLUME_ */ uint32_t initial_ramp; /**< ramp space in ms */ -} __packed; +} __packed __aligned(4); /* generic SRC component */ struct sof_ipc_comp_src { @@ -161,7 +168,7 @@ struct sof_ipc_comp_src { uint32_t source_rate; /**< source rate or 0 for variable */ uint32_t sink_rate; /**< sink rate or 0 for variable */ uint32_t rate_mask; /**< SOF_RATE_ supported rates */ -} __packed; +} __packed __aligned(4); /* generic ASRC component */ struct sof_ipc_comp_asrc { @@ -187,13 +194,13 @@ struct sof_ipc_comp_asrc { /* reserved for future use */ uint32_t reserved[4]; -} __attribute__((packed)); +} __packed __aligned(4); /* generic MUX component */ struct sof_ipc_comp_mux { struct sof_ipc_comp comp; struct sof_ipc_comp_config config; -} __packed; +} __packed __aligned(4); /* generic tone generator component */ struct sof_ipc_comp_tone { @@ -208,7 +215,7 @@ struct sof_ipc_comp_tone { int32_t period; int32_t repeats; int32_t ramp_step; -} __packed; +} __packed __aligned(4); /** \brief Types of processing components */ enum sof_ipc_process_type { @@ -234,8 +241,8 @@ struct sof_ipc_comp_process { /* reserved for future use */ uint32_t reserved[7]; - uint8_t data[]; -} __packed; + unsigned char data[]; +} __packed __aligned(4); /* frees components, buffers and pipelines * SOF_IPC_TPLG_COMP_FREE, SOF_IPC_TPLG_PIPE_FREE, SOF_IPC_TPLG_BUFFER_FREE @@ -243,13 +250,13 @@ struct sof_ipc_comp_process { struct sof_ipc_free { struct sof_ipc_cmd_hdr hdr; uint32_t id; -} __packed; +} __packed __aligned(4); struct sof_ipc_comp_reply { struct sof_ipc_reply rhdr; uint32_t id; uint32_t offset; -} __packed; +} __packed __aligned(4); /* * Pipeline @@ -274,25 +281,25 @@ struct sof_ipc_pipe_new { uint32_t frames_per_sched;/**< output frames of pipeline, 0 is variable */ uint32_t xrun_limit_usecs; /**< report xruns greater than limit */ uint32_t time_domain; /**< scheduling time domain */ -} __packed; +} __packed __aligned(4); /* pipeline construction complete - SOF_IPC_TPLG_PIPE_COMPLETE */ struct sof_ipc_pipe_ready { struct sof_ipc_cmd_hdr hdr; uint32_t comp_id; -} __packed; +} __packed __aligned(4); struct sof_ipc_pipe_free { struct sof_ipc_cmd_hdr hdr; uint32_t comp_id; -} __packed; +} __packed __aligned(4); /* connect two components in pipeline - SOF_IPC_TPLG_COMP_CONNECT */ struct sof_ipc_pipe_comp_connect { struct sof_ipc_cmd_hdr hdr; uint32_t source_id; uint32_t sink_id; -} __packed; +} __packed __aligned(4); /* external events */ enum sof_event_types { diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h index bd1b72bf47a5..ea9af2726a53 100644 --- a/include/sound/tas2781-dsp.h +++ b/include/sound/tas2781-dsp.h @@ -77,6 +77,11 @@ struct tasdev_blk { unsigned int nr_cmds; unsigned int blk_size; unsigned int nr_subblocks; + /* fixed m68k compiling issue, storing the dev_idx as a member of block + * can reduce unnecessary timeand system resource comsumption of + * dev_idx mapping every time the block data writing to the dsp. + */ + unsigned char dev_idx; unsigned char *data; }; diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h index a6c808b22318..b00d65417c31 100644 --- a/include/sound/tas2781.h +++ b/include/sound/tas2781.h @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ // -// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier +// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier // // Copyright (C) 2022 - 2023 Texas Instruments Incorporated // https://www.ti.com // -// The TAS2781 driver implements a flexible and configurable +// The TAS2563/TAS2781 driver implements a flexible and configurable // algo coefficient setting for one, two, or even multiple -// TAS2781 chips. +// TAS2563/TAS2781 chips. // // Author: Shenghao Ding <shenghao-ding@ti.com> // Author: Kevin Lu <kevin-lu@ti.com> @@ -22,6 +22,7 @@ #define TAS2781_DRV_VER 1 #define SMARTAMP_MODULE_NAME "tas2781" #define TAS2781_GLOBAL_ADDR 0x40 +#define TAS2563_GLOBAL_ADDR 0x48 #define TASDEVICE_RATES (SNDRV_PCM_RATE_44100 |\ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |\ SNDRV_PCM_RATE_88200) @@ -59,7 +60,8 @@ #define TASDEVICE_CMD_FIELD_W 0x4 enum audio_device { - TAS2781 = 0, + TAS2563, + TAS2781, }; enum device_catlog_id { @@ -121,6 +123,8 @@ struct tasdevice_priv { bool force_fwload_status; bool playback_started; bool isacpi; + unsigned int global_addr; + int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv, const struct firmware *fmw, int offset); int (*fw_parse_program_data)(struct tasdevice_priv *tas_priv, @@ -131,6 +135,9 @@ struct tasdevice_priv { const struct firmware *fmw, int offset); int (*tasdevice_load_block)(struct tasdevice_priv *tas_priv, struct tasdev_blk *block); + + int (*save_calibration)(struct tasdevice_priv *tas_priv); + void (*apply_calibration)(struct tasdevice_priv *tas_priv); }; void tas2781_reset(struct tasdevice_priv *tas_dev); @@ -139,6 +146,8 @@ int tascodec_init(struct tasdevice_priv *tas_priv, void *codec, struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c); int tasdevice_init(struct tasdevice_priv *tas_priv); void tasdevice_remove(struct tasdevice_priv *tas_priv); +int tasdevice_save_calibration(struct tasdevice_priv *tas_priv); +void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv); int tasdevice_dev_read(struct tasdevice_priv *tas_priv, unsigned short chn, unsigned int reg, unsigned int *value); int tasdevice_dev_write(struct tasdevice_priv *tas_priv, diff --git a/include/sound/wavefront.h b/include/sound/wavefront.h index 37ed437e2123..ef6f46accf29 100644 --- a/include/sound/wavefront.h +++ b/include/sound/wavefront.h @@ -8,34 +8,6 @@ * Copyright (c) by Paul Barton-Davis <pbd@op.net> */ -#if (!defined(__GNUC__) && !defined(__GNUG__)) - - You will not be able to compile this file correctly without gcc, because - it is necessary to pack the "wavefront_alias" structure to a size - of 22 bytes, corresponding to 16-bit alignment (as would have been - the case on the original platform, MS-DOS). If this is not done, - then WavePatch-format files cannot be read/written correctly. - The method used to do this here ("__attribute__((packed)") is - completely compiler dependent. - - All other wavefront_* types end up aligned to 32 bit values and - still have the same (correct) size. - -#else - - /* However, note that as of G++ 2.7.3.2, g++ was unable to - correctly parse *type* __attribute__ tags. It will do the - right thing if we use the "packed" attribute on each struct - member, which has the same semantics anyway. - */ - -#endif /* __GNUC__ */ - -/***************************** WARNING ******************************** - PLEASE DO NOT MODIFY THIS FILE IN ANY WAY THAT AFFECTS ITS ABILITY TO - BE USED WITH EITHER C *OR* C++. - **********************************************************************/ - #ifndef NUM_MIDIKEYS #define NUM_MIDIKEYS 128 #endif /* NUM_MIDIKEYS */ @@ -44,29 +16,6 @@ #define NUM_MIDICHANNELS 16 #endif /* NUM_MIDICHANNELS */ -/* These are very useful/important. the original wavefront interface - was developed on a 16 bit system, where sizeof(int) = 2 - bytes. Defining things like this makes the code much more portable, and - easier to understand without having to toggle back and forth - between a 16-bit view of the world and a 32-bit one. - */ - -#ifndef __KERNEL__ -/* keep them for compatibility */ -typedef short s16; -typedef unsigned short u16; -typedef int s32; -typedef unsigned int u32; -typedef char s8; -typedef unsigned char u8; -typedef s16 INT16; -typedef u16 UINT16; -typedef s32 INT32; -typedef u32 UINT32; -typedef s8 CHAR8; -typedef u8 UCHAR8; -#endif - /* Pseudo-commands not part of the WaveFront command set. These are used for various driver controls and direct hardware control. @@ -468,7 +417,7 @@ typedef struct wf_alias { */ u8 sixteen_bit_padding; -} __attribute__((packed)) wavefront_alias; +} __packed wavefront_alias; typedef struct wf_drum { u8 PatchNumber; diff --git a/include/sound/wm0010.h b/include/sound/wm0010.h index 13b473935ca1..14ff9056c5d0 100644 --- a/include/sound/wm0010.h +++ b/include/sound/wm0010.h @@ -11,12 +11,6 @@ #define WM0010_PDATA_H struct wm0010_pdata { - int gpio_reset; - - /* Set if there is an inverter between the GPIO controlling - * the reset signal and the device. - */ - int reset_active_high; int irq_flags; }; diff --git a/include/sound/wm1250-ev1.h b/include/sound/wm1250-ev1.h deleted file mode 100644 index d16614ebecb4..000000000000 --- a/include/sound/wm1250-ev1.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/sound/wm1250-ev1.h - Platform data for WM1250-EV1 - * - * Copyright 2011 Wolfson Microelectronics. PLC. - */ - -#ifndef __LINUX_SND_WM1250_EV1_H -#define __LINUX_SND_WM1250_EV1_H - -#define WM1250_EV1_NUM_GPIOS 5 - -#define WM1250_EV1_GPIO_CLK_ENA 0 -#define WM1250_EV1_GPIO_CLK_SEL0 1 -#define WM1250_EV1_GPIO_CLK_SEL1 2 -#define WM1250_EV1_GPIO_OSR 3 -#define WM1250_EV1_GPIO_MASTER 4 - - -struct wm1250_ev1_pdata { - int gpios[WM1250_EV1_NUM_GPIOS]; -}; - -#endif diff --git a/include/sound/wm2200.h b/include/sound/wm2200.h index 9987e6c09bdc..2e4913ee2505 100644 --- a/include/sound/wm2200.h +++ b/include/sound/wm2200.h @@ -42,8 +42,6 @@ struct wm2200_micbias { }; struct wm2200_pdata { - int reset; /** GPIO controlling /RESET, if any */ - int ldo_ena; /** GPIO controlling LODENA, if any */ int irq_flags; int gpio_defaults[4]; diff --git a/include/sound/wm5100.h b/include/sound/wm5100.h index b94badf72947..1c48090fdb2c 100644 --- a/include/sound/wm5100.h +++ b/include/sound/wm5100.h @@ -36,11 +36,7 @@ struct wm5100_jack_mode { #define WM5100_GPIO_SET 0x10000 struct wm5100_pdata { - int reset; /** GPIO controlling /RESET, if any */ - int ldo_ena; /** GPIO controlling LODENA, if any */ - int hp_pol; /** GPIO controlling headset polarity, if any */ int irq_flags; - int gpio_base; struct wm5100_jack_mode jack_modes[2]; diff --git a/include/sound/wm8996.h b/include/sound/wm8996.h index 247f9917e33d..342abeef288f 100644 --- a/include/sound/wm8996.h +++ b/include/sound/wm8996.h @@ -33,8 +33,6 @@ struct wm8996_retune_mobile_config { struct wm8996_pdata { int irq_flags; /** Set IRQ trigger flags; default active low */ - int ldo_ena; /** GPIO for LDO1; -1 for none */ - int micdet_def; /** Default MICDET_SRC/HP1FB_SRC/MICD_BIAS */ enum wm8996_inmode inl_mode; @@ -42,7 +40,6 @@ struct wm8996_pdata { u32 spkmute_seq; /** Value for register 0x802 */ - int gpio_base; u32 gpio_default[5]; int num_retune_mobile_cfgs; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 159567359bbb..97099a5e3f6c 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -108,6 +108,15 @@ #define SE_MODE_PAGE_BUF 512 #define SE_SENSE_BUF 96 +enum target_submit_type { + /* Use the fabric driver's default submission type */ + TARGET_FABRIC_DEFAULT_SUBMIT, + /* Submit from the calling context */ + TARGET_DIRECT_SUBMIT, + /* Defer submission to the LIO workqueue */ + TARGET_QUEUE_SUBMIT, +}; + /* struct se_hba->hba_flags */ enum hba_flags_table { HBA_FLAGS_INTERNAL_USE = 0x01, @@ -717,6 +726,7 @@ struct se_dev_attrib { u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; + u8 submit_type; struct se_device *da_dev; struct config_group da_group; }; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index b188b1e90e1e..3378ff9ee271 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -113,11 +113,20 @@ struct target_core_fabric_ops { struct configfs_attribute **tfc_tpg_nacl_param_attrs; /* - * Set this member variable to true if the SCSI transport protocol + * Set this member variable if the SCSI transport protocol * (e.g. iSCSI) requires that the Data-Out buffer is transferred in * its entirety before a command is aborted. */ - bool write_pending_must_be_called; + unsigned int write_pending_must_be_called:1; + /* + * Set this if the driver supports submitting commands to the backend + * from target_submit/target_submit_cmd. + */ + unsigned int direct_submit_supp:1; + /* + * Set this to a target_submit_type value. + */ + u8 default_submit_type; }; int target_register_template(const struct target_core_fabric_ops *fo); @@ -166,20 +175,18 @@ int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb, struct scatterlist *sgl, u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count, struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp); -void target_submit(struct se_cmd *se_cmd); +int target_submit(struct se_cmd *se_cmd); sense_reason_t transport_lookup_cmd_lun(struct se_cmd *); sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb, gfp_t gfp); sense_reason_t target_cmd_parse_cdb(struct se_cmd *); void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, unsigned char *, u64, u32, int, int, int); -void target_queue_submission(struct se_cmd *se_cmd); int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, unsigned char *sense, u64 unpacked_lun, void *fabric_tmr_ptr, unsigned char tm_type, gfp_t, u64, int); -int transport_handle_cdb_direct(struct se_cmd *); sense_reason_t transport_generic_new_cmd(struct se_cmd *); void target_put_cmd_and_wait(struct se_cmd *cmd); @@ -197,8 +204,6 @@ void target_stop_session(struct se_session *se_sess); void target_wait_for_sess_cmds(struct se_session *); void target_show_cmd(const char *pfx, struct se_cmd *cmd); -int core_alua_check_nonop_delay(struct se_cmd *); - int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); void core_tmr_release_req(struct se_tmr_req *); int transport_generic_handle_tmr(struct se_cmd *); diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h index 4dfa6d7f83ba..cd104a1343e2 100644 --- a/include/trace/events/9p.h +++ b/include/trace/events/9p.h @@ -178,18 +178,21 @@ TRACE_EVENT(9p_protocol_dump, __field( void *, clnt ) __field( __u8, type ) __field( __u16, tag ) - __array( unsigned char, line, P9_PROTO_DUMP_SZ ) + __dynamic_array(unsigned char, line, + min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ)) ), TP_fast_assign( __entry->clnt = clnt; __entry->type = pdu->id; __entry->tag = pdu->tag; - memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ); + memcpy(__get_dynamic_array(line), pdu->sdata, + __get_dynamic_array_len(line)); ), - TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n", + TP_printk("clnt %lu %s(tag = %d)\n%*ph\n", (unsigned long)__entry->clnt, show_9p_op(__entry->type), - __entry->tag, 0, __entry->line, 16, __entry->line + 16) + __entry->tag, __get_dynamic_array_len(line), + __get_dynamic_array(line)) ); diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index e9d412d19dbb..08f2c93d6b16 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -18,97 +18,6 @@ #ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY -enum afs_call_trace { - afs_call_trace_alloc, - afs_call_trace_free, - afs_call_trace_get, - afs_call_trace_put, - afs_call_trace_wake, - afs_call_trace_work, -}; - -enum afs_server_trace { - afs_server_trace_alloc, - afs_server_trace_callback, - afs_server_trace_destroy, - afs_server_trace_free, - afs_server_trace_gc, - afs_server_trace_get_by_addr, - afs_server_trace_get_by_uuid, - afs_server_trace_get_caps, - afs_server_trace_get_install, - afs_server_trace_get_new_cbi, - afs_server_trace_get_probe, - afs_server_trace_give_up_cb, - afs_server_trace_purging, - afs_server_trace_put_call, - afs_server_trace_put_cbi, - afs_server_trace_put_find_rsq, - afs_server_trace_put_probe, - afs_server_trace_put_slist, - afs_server_trace_put_slist_isort, - afs_server_trace_put_uuid_rsq, - afs_server_trace_update, -}; - - -enum afs_volume_trace { - afs_volume_trace_alloc, - afs_volume_trace_free, - afs_volume_trace_get_alloc_sbi, - afs_volume_trace_get_cell_insert, - afs_volume_trace_get_new_op, - afs_volume_trace_get_query_alias, - afs_volume_trace_put_cell_dup, - afs_volume_trace_put_cell_root, - afs_volume_trace_put_destroy_sbi, - afs_volume_trace_put_free_fc, - afs_volume_trace_put_put_op, - afs_volume_trace_put_query_alias, - afs_volume_trace_put_validate_fc, - afs_volume_trace_remove, -}; - -enum afs_cell_trace { - afs_cell_trace_alloc, - afs_cell_trace_free, - afs_cell_trace_get_queue_dns, - afs_cell_trace_get_queue_manage, - afs_cell_trace_get_queue_new, - afs_cell_trace_get_vol, - afs_cell_trace_insert, - afs_cell_trace_manage, - afs_cell_trace_put_candidate, - afs_cell_trace_put_destroy, - afs_cell_trace_put_queue_fail, - afs_cell_trace_put_queue_work, - afs_cell_trace_put_vol, - afs_cell_trace_see_source, - afs_cell_trace_see_ws, - afs_cell_trace_unuse_alias, - afs_cell_trace_unuse_check_alias, - afs_cell_trace_unuse_delete, - afs_cell_trace_unuse_fc, - afs_cell_trace_unuse_lookup, - afs_cell_trace_unuse_mntpt, - afs_cell_trace_unuse_no_pin, - afs_cell_trace_unuse_parse, - afs_cell_trace_unuse_pin, - afs_cell_trace_unuse_probe, - afs_cell_trace_unuse_sbi, - afs_cell_trace_unuse_ws, - afs_cell_trace_use_alias, - afs_cell_trace_use_check_alias, - afs_cell_trace_use_fc, - afs_cell_trace_use_fc_alias, - afs_cell_trace_use_lookup, - afs_cell_trace_use_mntpt, - afs_cell_trace_use_pin, - afs_cell_trace_use_probe, - afs_cell_trace_use_sbi, - afs_cell_trace_wait, -}; - enum afs_fs_operation { afs_FS_FetchData = 130, /* AFS Fetch file data */ afs_FS_FetchACL = 131, /* AFS Fetch file ACL */ @@ -202,121 +111,6 @@ enum yfs_cm_operation { yfs_CB_CallBack = 64204, }; -enum afs_edit_dir_op { - afs_edit_dir_create, - afs_edit_dir_create_error, - afs_edit_dir_create_inval, - afs_edit_dir_create_nospc, - afs_edit_dir_delete, - afs_edit_dir_delete_error, - afs_edit_dir_delete_inval, - afs_edit_dir_delete_noent, -}; - -enum afs_edit_dir_reason { - afs_edit_dir_for_create, - afs_edit_dir_for_link, - afs_edit_dir_for_mkdir, - afs_edit_dir_for_rename_0, - afs_edit_dir_for_rename_1, - afs_edit_dir_for_rename_2, - afs_edit_dir_for_rmdir, - afs_edit_dir_for_silly_0, - afs_edit_dir_for_silly_1, - afs_edit_dir_for_symlink, - afs_edit_dir_for_unlink, -}; - -enum afs_eproto_cause { - afs_eproto_bad_status, - afs_eproto_cb_count, - afs_eproto_cb_fid_count, - afs_eproto_cellname_len, - afs_eproto_file_type, - afs_eproto_ibulkst_cb_count, - afs_eproto_ibulkst_count, - afs_eproto_motd_len, - afs_eproto_offline_msg_len, - afs_eproto_volname_len, - afs_eproto_yvl_fsendpt4_len, - afs_eproto_yvl_fsendpt6_len, - afs_eproto_yvl_fsendpt_num, - afs_eproto_yvl_fsendpt_type, - afs_eproto_yvl_vlendpt4_len, - afs_eproto_yvl_vlendpt6_len, - afs_eproto_yvl_vlendpt_type, -}; - -enum afs_io_error { - afs_io_error_cm_reply, - afs_io_error_extract, - afs_io_error_fs_probe_fail, - afs_io_error_vl_lookup_fail, - afs_io_error_vl_probe_fail, -}; - -enum afs_file_error { - afs_file_error_dir_bad_magic, - afs_file_error_dir_big, - afs_file_error_dir_missing_page, - afs_file_error_dir_name_too_long, - afs_file_error_dir_over_end, - afs_file_error_dir_small, - afs_file_error_dir_unmarked_ext, - afs_file_error_mntpt, - afs_file_error_writeback_fail, -}; - -enum afs_flock_event { - afs_flock_acquired, - afs_flock_callback_break, - afs_flock_defer_unlock, - afs_flock_extend_fail, - afs_flock_fail_other, - afs_flock_fail_perm, - afs_flock_no_lockers, - afs_flock_release_fail, - afs_flock_silly_delete, - afs_flock_timestamp, - afs_flock_try_to_lock, - afs_flock_vfs_lock, - afs_flock_vfs_locking, - afs_flock_waited, - afs_flock_waiting, - afs_flock_work_extending, - afs_flock_work_retry, - afs_flock_work_unlocking, - afs_flock_would_block, -}; - -enum afs_flock_operation { - afs_flock_op_copy_lock, - afs_flock_op_flock, - afs_flock_op_grant, - afs_flock_op_lock, - afs_flock_op_release_lock, - afs_flock_op_return_ok, - afs_flock_op_return_eagain, - afs_flock_op_return_edeadlk, - afs_flock_op_return_error, - afs_flock_op_set_lock, - afs_flock_op_unlock, - afs_flock_op_wake, -}; - -enum afs_cb_break_reason { - afs_cb_break_no_break, - afs_cb_break_no_promise, - afs_cb_break_for_callback, - afs_cb_break_for_deleted, - afs_cb_break_for_lapsed, - afs_cb_break_for_s_reinit, - afs_cb_break_for_unlink, - afs_cb_break_for_v_break, - afs_cb_break_for_volume_callback, - afs_cb_break_for_zap, -}; - #endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */ /* @@ -357,9 +151,11 @@ enum afs_cb_break_reason { EM(afs_volume_trace_alloc, "ALLOC ") \ EM(afs_volume_trace_free, "FREE ") \ EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \ + EM(afs_volume_trace_get_callback, "GET callback ") \ EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \ EM(afs_volume_trace_get_new_op, "GET op-new ") \ EM(afs_volume_trace_get_query_alias, "GET cell-alias") \ + EM(afs_volume_trace_put_callback, "PUT callback ") \ EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \ EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \ EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \ @@ -391,6 +187,7 @@ enum afs_cb_break_reason { EM(afs_cell_trace_unuse_fc, "UNU fc ") \ EM(afs_cell_trace_unuse_lookup, "UNU lookup") \ EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \ + EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \ EM(afs_cell_trace_unuse_parse, "UNU parse ") \ EM(afs_cell_trace_unuse_pin, "UNU pin ") \ EM(afs_cell_trace_unuse_probe, "UNU probe ") \ @@ -407,6 +204,40 @@ enum afs_cb_break_reason { EM(afs_cell_trace_use_sbi, "USE sbi ") \ E_(afs_cell_trace_wait, "WAIT ") +#define afs_alist_traces \ + EM(afs_alist_trace_alloc, "ALLOC ") \ + EM(afs_alist_trace_get_estate, "GET estate") \ + EM(afs_alist_trace_get_vlgetcaps, "GET vgtcap") \ + EM(afs_alist_trace_get_vlprobe, "GET vprobe") \ + EM(afs_alist_trace_get_vlrotate_set, "GET vl-rot") \ + EM(afs_alist_trace_put_estate, "PUT estate") \ + EM(afs_alist_trace_put_getaddru, "PUT GtAdrU") \ + EM(afs_alist_trace_put_parse_empty, "PUT p-empt") \ + EM(afs_alist_trace_put_parse_error, "PUT p-err ") \ + EM(afs_alist_trace_put_server_dup, "PUT sv-dup") \ + EM(afs_alist_trace_put_server_oom, "PUT sv-oom") \ + EM(afs_alist_trace_put_server_update, "PUT sv-upd") \ + EM(afs_alist_trace_put_vlgetcaps, "PUT vgtcap") \ + EM(afs_alist_trace_put_vlprobe, "PUT vprobe") \ + EM(afs_alist_trace_put_vlrotate_end, "PUT vr-end") \ + EM(afs_alist_trace_put_vlrotate_fail, "PUT vr-fai") \ + EM(afs_alist_trace_put_vlrotate_next, "PUT vr-nxt") \ + EM(afs_alist_trace_put_vlrotate_restart,"PUT vr-rst") \ + EM(afs_alist_trace_put_vlserver, "PUT vlsrvr") \ + EM(afs_alist_trace_put_vlserver_old, "PUT vs-old") \ + E_(afs_alist_trace_free, "FREE ") + +#define afs_estate_traces \ + EM(afs_estate_trace_alloc_probe, "ALLOC prob") \ + EM(afs_estate_trace_alloc_server, "ALLOC srvr") \ + EM(afs_estate_trace_get_server_state, "GET srv-st") \ + EM(afs_estate_trace_get_getcaps, "GET getcap") \ + EM(afs_estate_trace_put_getcaps, "PUT getcap") \ + EM(afs_estate_trace_put_probe, "PUT probe ") \ + EM(afs_estate_trace_put_server, "PUT server") \ + EM(afs_estate_trace_put_server_state, "PUT srv-st") \ + E_(afs_estate_trace_free, "FREE ") + #define afs_fs_operations \ EM(afs_FS_FetchData, "FS.FetchData") \ EM(afs_FS_FetchStatus, "FS.FetchStatus") \ @@ -604,15 +435,67 @@ enum afs_cb_break_reason { #define afs_cb_break_reasons \ EM(afs_cb_break_no_break, "no-break") \ - EM(afs_cb_break_no_promise, "no-promise") \ EM(afs_cb_break_for_callback, "break-cb") \ + EM(afs_cb_break_for_creation_regress, "creation-regress") \ EM(afs_cb_break_for_deleted, "break-del") \ - EM(afs_cb_break_for_lapsed, "break-lapsed") \ EM(afs_cb_break_for_s_reinit, "s-reinit") \ EM(afs_cb_break_for_unlink, "break-unlink") \ - EM(afs_cb_break_for_v_break, "break-v") \ + EM(afs_cb_break_for_update_regress, "update-regress") \ EM(afs_cb_break_for_volume_callback, "break-v-cb") \ - E_(afs_cb_break_for_zap, "break-zap") + EM(afs_cb_break_for_vos_release, "break-vos-release") \ + E_(afs_cb_break_volume_excluded, "vol-excluded") + +#define afs_rotate_traces \ + EM(afs_rotate_trace_aborted, "Abortd") \ + EM(afs_rotate_trace_busy_sleep, "BsySlp") \ + EM(afs_rotate_trace_check_vol_status, "VolStt") \ + EM(afs_rotate_trace_failed, "Failed") \ + EM(afs_rotate_trace_iter, "Iter ") \ + EM(afs_rotate_trace_iterate_addr, "ItAddr") \ + EM(afs_rotate_trace_next_server, "NextSv") \ + EM(afs_rotate_trace_no_more_servers, "NoMore") \ + EM(afs_rotate_trace_nomem, "Nomem ") \ + EM(afs_rotate_trace_probe_error, "PrbErr") \ + EM(afs_rotate_trace_probe_fileserver, "PrbFsv") \ + EM(afs_rotate_trace_probe_none, "PrbNon") \ + EM(afs_rotate_trace_probe_response, "PrbRsp") \ + EM(afs_rotate_trace_probe_superseded, "PrbSup") \ + EM(afs_rotate_trace_restart, "Rstart") \ + EM(afs_rotate_trace_retry_server, "RtrySv") \ + EM(afs_rotate_trace_selected_server, "SlctSv") \ + EM(afs_rotate_trace_stale_lock, "StlLck") \ + EM(afs_rotate_trace_start, "Start ") \ + EM(afs_rotate_trace_stop, "Stop ") \ + E_(afs_rotate_trace_stopped, "Stoppd") + +/* + * Generate enums for tracing information. + */ +#ifndef __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY +#define __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY + +#undef EM +#undef E_ +#define EM(a, b) a, +#define E_(a, b) a + +enum afs_alist_trace { afs_alist_traces } __mode(byte); +enum afs_call_trace { afs_call_traces } __mode(byte); +enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte); +enum afs_cell_trace { afs_cell_traces } __mode(byte); +enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte); +enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte); +enum afs_eproto_cause { afs_eproto_causes } __mode(byte); +enum afs_estate_trace { afs_estate_traces } __mode(byte); +enum afs_file_error { afs_file_errors } __mode(byte); +enum afs_flock_event { afs_flock_events } __mode(byte); +enum afs_flock_operation { afs_flock_operations } __mode(byte); +enum afs_io_error { afs_io_errors } __mode(byte); +enum afs_rotate_trace { afs_rotate_traces } __mode(byte); +enum afs_server_trace { afs_server_traces } __mode(byte); +enum afs_volume_trace { afs_volume_traces } __mode(byte); + +#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */ /* * Export enum symbols via userspace. @@ -622,21 +505,24 @@ enum afs_cb_break_reason { #define EM(a, b) TRACE_DEFINE_ENUM(a); #define E_(a, b) TRACE_DEFINE_ENUM(a); +afs_alist_traces; afs_call_traces; -afs_server_traces; +afs_cb_break_reasons; afs_cell_traces; -afs_fs_operations; -afs_vl_operations; afs_cm_operations; -yfs_cm_operations; afs_edit_dir_ops; afs_edit_dir_reasons; afs_eproto_causes; -afs_io_errors; +afs_estate_traces; afs_file_errors; -afs_flock_types; afs_flock_operations; -afs_cb_break_reasons; +afs_flock_types; +afs_fs_operations; +afs_io_errors; +afs_rotate_traces; +afs_server_traces; +afs_vl_operations; +yfs_cm_operations; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -654,12 +540,12 @@ TRACE_EVENT(afs_receive_data, TP_ARGS(call, iter, want_more, ret), TP_STRUCT__entry( - __field(loff_t, remain ) - __field(unsigned int, call ) - __field(enum afs_call_state, state ) - __field(unsigned short, unmarshall ) - __field(bool, want_more ) - __field(int, ret ) + __field(loff_t, remain) + __field(unsigned int, call) + __field(enum afs_call_state, state) + __field(unsigned short, unmarshall) + __field(bool, want_more) + __field(int, ret) ), TP_fast_assign( @@ -686,9 +572,9 @@ TRACE_EVENT(afs_notify_call, TP_ARGS(rxcall, call), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(enum afs_call_state, state ) - __field(unsigned short, unmarshall ) + __field(unsigned int, call) + __field(enum afs_call_state, state) + __field(unsigned short, unmarshall) ), TP_fast_assign( @@ -708,9 +594,9 @@ TRACE_EVENT(afs_cb_call, TP_ARGS(call), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(u32, op ) - __field(u16, service_id ) + __field(unsigned int, call) + __field(u32, op) + __field(u16, service_id) ), TP_fast_assign( @@ -733,11 +619,11 @@ TRACE_EVENT(afs_call, TP_ARGS(call_debug_id, op, ref, outstanding, where), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(int, op ) - __field(int, ref ) - __field(int, outstanding ) - __field(const void *, where ) + __field(unsigned int, call) + __field(int, op) + __field(int, ref) + __field(int, outstanding) + __field(const void *, where) ), TP_fast_assign( @@ -762,9 +648,9 @@ TRACE_EVENT(afs_make_fs_call, TP_ARGS(call, fid), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(enum afs_fs_operation, op ) - __field_struct(struct afs_fid, fid ) + __field(unsigned int, call) + __field(enum afs_fs_operation, op) + __field_struct(struct afs_fid, fid) ), TP_fast_assign( @@ -794,10 +680,10 @@ TRACE_EVENT(afs_make_fs_calli, TP_ARGS(call, fid, i), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(unsigned int, i ) - __field(enum afs_fs_operation, op ) - __field_struct(struct afs_fid, fid ) + __field(unsigned int, call) + __field(unsigned int, i) + __field(enum afs_fs_operation, op) + __field_struct(struct afs_fid, fid) ), TP_fast_assign( @@ -829,10 +715,10 @@ TRACE_EVENT(afs_make_fs_call1, TP_ARGS(call, fid, name), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(enum afs_fs_operation, op ) - __field_struct(struct afs_fid, fid ) - __array(char, name, 24 ) + __field(unsigned int, call) + __field(enum afs_fs_operation, op) + __field_struct(struct afs_fid, fid) + __array(char, name, 24) ), TP_fast_assign( @@ -866,11 +752,11 @@ TRACE_EVENT(afs_make_fs_call2, TP_ARGS(call, fid, name, name2), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(enum afs_fs_operation, op ) - __field_struct(struct afs_fid, fid ) - __array(char, name, 24 ) - __array(char, name2, 24 ) + __field(unsigned int, call) + __field(enum afs_fs_operation, op) + __field_struct(struct afs_fid, fid) + __array(char, name, 24) + __array(char, name2, 24) ), TP_fast_assign( @@ -907,8 +793,8 @@ TRACE_EVENT(afs_make_vl_call, TP_ARGS(call), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(enum afs_vl_operation, op ) + __field(unsigned int, call) + __field(enum afs_vl_operation, op) ), TP_fast_assign( @@ -927,10 +813,10 @@ TRACE_EVENT(afs_call_done, TP_ARGS(call), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(struct rxrpc_call *, rx_call ) - __field(int, ret ) - __field(u32, abort_code ) + __field(unsigned int, call) + __field(struct rxrpc_call *, rx_call) + __field(int, ret) + __field(u32, abort_code) ), TP_fast_assign( @@ -953,10 +839,10 @@ TRACE_EVENT(afs_send_data, TP_ARGS(call, msg), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(unsigned int, flags ) - __field(loff_t, offset ) - __field(loff_t, count ) + __field(unsigned int, call) + __field(unsigned int, flags) + __field(loff_t, offset) + __field(loff_t, count) ), TP_fast_assign( @@ -977,10 +863,10 @@ TRACE_EVENT(afs_sent_data, TP_ARGS(call, msg, ret), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(int, ret ) - __field(loff_t, offset ) - __field(loff_t, count ) + __field(unsigned int, call) + __field(int, ret) + __field(loff_t, offset) + __field(loff_t, count) ), TP_fast_assign( @@ -1001,9 +887,9 @@ TRACE_EVENT(afs_dir_check_failed, TP_ARGS(vnode, off, i_size), TP_STRUCT__entry( - __field(struct afs_vnode *, vnode ) - __field(loff_t, off ) - __field(loff_t, i_size ) + __field(struct afs_vnode *, vnode) + __field(loff_t, off) + __field(loff_t, i_size) ), TP_fast_assign( @@ -1016,37 +902,6 @@ TRACE_EVENT(afs_dir_check_failed, __entry->vnode, __entry->off, __entry->i_size) ); -TRACE_EVENT(afs_folio_dirty, - TP_PROTO(struct afs_vnode *vnode, const char *where, struct folio *folio), - - TP_ARGS(vnode, where, folio), - - TP_STRUCT__entry( - __field(struct afs_vnode *, vnode ) - __field(const char *, where ) - __field(pgoff_t, index ) - __field(unsigned long, from ) - __field(unsigned long, to ) - ), - - TP_fast_assign( - unsigned long priv = (unsigned long)folio_get_private(folio); - __entry->vnode = vnode; - __entry->where = where; - __entry->index = folio_index(folio); - __entry->from = afs_folio_dirty_from(folio, priv); - __entry->to = afs_folio_dirty_to(folio, priv); - __entry->to |= (afs_is_folio_dirty_mmapped(priv) ? - (1UL << (BITS_PER_LONG - 1)) : 0); - ), - - TP_printk("vn=%p %lx %s %lx-%lx%s", - __entry->vnode, __entry->index, __entry->where, - __entry->from, - __entry->to & ~(1UL << (BITS_PER_LONG - 1)), - __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "") - ); - TRACE_EVENT(afs_call_state, TP_PROTO(struct afs_call *call, enum afs_call_state from, @@ -1056,11 +911,11 @@ TRACE_EVENT(afs_call_state, TP_ARGS(call, from, to, ret, remote_abort), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(enum afs_call_state, from ) - __field(enum afs_call_state, to ) - __field(int, ret ) - __field(u32, abort ) + __field(unsigned int, call) + __field(enum afs_call_state, from) + __field(enum afs_call_state, to) + __field(int, ret) + __field(u32, abort) ), TP_fast_assign( @@ -1084,9 +939,9 @@ TRACE_EVENT(afs_lookup, TP_ARGS(dvnode, name, fid), TP_STRUCT__entry( - __field_struct(struct afs_fid, dfid ) - __field_struct(struct afs_fid, fid ) - __array(char, name, 24 ) + __field_struct(struct afs_fid, dfid) + __field_struct(struct afs_fid, fid) + __array(char, name, 24) ), TP_fast_assign( @@ -1116,15 +971,15 @@ TRACE_EVENT(afs_edit_dir, TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name), TP_STRUCT__entry( - __field(unsigned int, vnode ) - __field(unsigned int, unique ) - __field(enum afs_edit_dir_reason, why ) - __field(enum afs_edit_dir_op, op ) - __field(unsigned int, block ) - __field(unsigned short, slot ) - __field(unsigned int, f_vnode ) - __field(unsigned int, f_unique ) - __array(char, name, 24 ) + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(enum afs_edit_dir_reason, why) + __field(enum afs_edit_dir_op, op) + __field(unsigned int, block) + __field(unsigned short, slot) + __field(unsigned int, f_vnode) + __field(unsigned int, f_unique) + __array(char, name, 24) ), TP_fast_assign( @@ -1157,8 +1012,8 @@ TRACE_EVENT(afs_protocol_error, TP_ARGS(call, cause), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(enum afs_eproto_cause, cause ) + __field(unsigned int, call) + __field(enum afs_eproto_cause, cause) ), TP_fast_assign( @@ -1177,9 +1032,9 @@ TRACE_EVENT(afs_io_error, TP_ARGS(call, error, where), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(int, error ) - __field(enum afs_io_error, where ) + __field(unsigned int, call) + __field(int, error) + __field(enum afs_io_error, where) ), TP_fast_assign( @@ -1199,9 +1054,9 @@ TRACE_EVENT(afs_file_error, TP_ARGS(vnode, error, where), TP_STRUCT__entry( - __field_struct(struct afs_fid, fid ) - __field(int, error ) - __field(enum afs_file_error, where ) + __field_struct(struct afs_fid, fid) + __field(int, error) + __field(enum afs_file_error, where) ), TP_fast_assign( @@ -1216,15 +1071,40 @@ TRACE_EVENT(afs_file_error, __print_symbolic(__entry->where, afs_file_errors)) ); +TRACE_EVENT(afs_bulkstat_error, + TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort), + + TP_ARGS(op, fid, index, abort), + + TP_STRUCT__entry( + __field_struct(struct afs_fid, fid) + __field(unsigned int, op) + __field(unsigned int, index) + __field(s32, abort) + ), + + TP_fast_assign( + __entry->op = op->debug_id; + __entry->fid = *fid; + __entry->index = index; + __entry->abort = abort; + ), + + TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d", + __entry->op, __entry->index, + __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique, + __entry->abort) + ); + TRACE_EVENT(afs_cm_no_server, TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx), TP_ARGS(call, srx), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(unsigned int, op_id ) - __field_struct(struct sockaddr_rxrpc, srx ) + __field(unsigned int, call) + __field(unsigned int, op_id) + __field_struct(struct sockaddr_rxrpc, srx) ), TP_fast_assign( @@ -1243,9 +1123,9 @@ TRACE_EVENT(afs_cm_no_server_u, TP_ARGS(call, uuid), TP_STRUCT__entry( - __field(unsigned int, call ) - __field(unsigned int, op_id ) - __field_struct(uuid_t, uuid ) + __field(unsigned int, call) + __field(unsigned int, op_id) + __field_struct(uuid_t, uuid) ), TP_fast_assign( @@ -1265,11 +1145,11 @@ TRACE_EVENT(afs_flock_ev, TP_ARGS(vnode, fl, event, error), TP_STRUCT__entry( - __field_struct(struct afs_fid, fid ) - __field(enum afs_flock_event, event ) - __field(enum afs_lock_state, state ) - __field(int, error ) - __field(unsigned int, debug_id ) + __field_struct(struct afs_fid, fid) + __field(enum afs_flock_event, event) + __field(enum afs_lock_state, state) + __field(int, error) + __field(unsigned int, debug_id) ), TP_fast_assign( @@ -1295,13 +1175,13 @@ TRACE_EVENT(afs_flock_op, TP_ARGS(vnode, fl, op), TP_STRUCT__entry( - __field_struct(struct afs_fid, fid ) - __field(loff_t, from ) - __field(loff_t, len ) - __field(enum afs_flock_operation, op ) - __field(unsigned char, type ) - __field(unsigned int, flags ) - __field(unsigned int, debug_id ) + __field_struct(struct afs_fid, fid) + __field(loff_t, from) + __field(loff_t, len) + __field(enum afs_flock_operation, op) + __field(unsigned char, type) + __field(unsigned int, flags) + __field(unsigned int, debug_id) ), TP_fast_assign( @@ -1328,7 +1208,7 @@ TRACE_EVENT(afs_reload_dir, TP_ARGS(vnode), TP_STRUCT__entry( - __field_struct(struct afs_fid, fid ) + __field_struct(struct afs_fid, fid) ), TP_fast_assign( @@ -1345,8 +1225,8 @@ TRACE_EVENT(afs_silly_rename, TP_ARGS(vnode, done), TP_STRUCT__entry( - __field_struct(struct afs_fid, fid ) - __field(bool, done ) + __field_struct(struct afs_fid, fid) + __field(bool, done) ), TP_fast_assign( @@ -1365,9 +1245,9 @@ TRACE_EVENT(afs_get_tree, TP_ARGS(cell, volume), TP_STRUCT__entry( - __field(u64, vid ) - __array(char, cell, 24 ) - __array(char, volume, 24 ) + __field(u64, vid) + __array(char, cell, 24) + __array(char, volume, 24) ), TP_fast_assign( @@ -1385,6 +1265,30 @@ TRACE_EVENT(afs_get_tree, __entry->cell, __entry->volume, __entry->vid) ); +TRACE_EVENT(afs_cb_v_break, + TP_PROTO(afs_volid_t vid, unsigned int cb_v_break, + enum afs_cb_break_reason reason), + + TP_ARGS(vid, cb_v_break, reason), + + TP_STRUCT__entry( + __field(afs_volid_t, vid) + __field(unsigned int, cb_v_break) + __field(enum afs_cb_break_reason, reason) + ), + + TP_fast_assign( + __entry->vid = vid; + __entry->cb_v_break = cb_v_break; + __entry->reason = reason; + ), + + TP_printk("%llx vb=%x %s", + __entry->vid, + __entry->cb_v_break, + __print_symbolic(__entry->reason, afs_cb_break_reasons)) + ); + TRACE_EVENT(afs_cb_break, TP_PROTO(struct afs_fid *fid, unsigned int cb_break, enum afs_cb_break_reason reason, bool skipped), @@ -1392,10 +1296,10 @@ TRACE_EVENT(afs_cb_break, TP_ARGS(fid, cb_break, reason, skipped), TP_STRUCT__entry( - __field_struct(struct afs_fid, fid ) - __field(unsigned int, cb_break ) - __field(enum afs_cb_break_reason, reason ) - __field(bool, skipped ) + __field_struct(struct afs_fid, fid) + __field(unsigned int, cb_break) + __field(enum afs_cb_break_reason, reason) + __field(bool, skipped) ), TP_fast_assign( @@ -1418,8 +1322,8 @@ TRACE_EVENT(afs_cb_miss, TP_ARGS(fid, reason), TP_STRUCT__entry( - __field_struct(struct afs_fid, fid ) - __field(enum afs_cb_break_reason, reason ) + __field_struct(struct afs_fid, fid) + __field(enum afs_cb_break_reason, reason) ), TP_fast_assign( @@ -1439,10 +1343,10 @@ TRACE_EVENT(afs_server, TP_ARGS(server_debug_id, ref, active, reason), TP_STRUCT__entry( - __field(unsigned int, server ) - __field(int, ref ) - __field(int, active ) - __field(int, reason ) + __field(unsigned int, server) + __field(int, ref) + __field(int, active) + __field(int, reason) ), TP_fast_assign( @@ -1465,9 +1369,9 @@ TRACE_EVENT(afs_volume, TP_ARGS(vid, ref, reason), TP_STRUCT__entry( - __field(afs_volid_t, vid ) - __field(int, ref ) - __field(enum afs_volume_trace, reason ) + __field(afs_volid_t, vid) + __field(int, ref) + __field(enum afs_volume_trace, reason) ), TP_fast_assign( @@ -1489,10 +1393,10 @@ TRACE_EVENT(afs_cell, TP_ARGS(cell_debug_id, ref, active, reason), TP_STRUCT__entry( - __field(unsigned int, cell ) - __field(int, ref ) - __field(int, active ) - __field(int, reason ) + __field(unsigned int, cell) + __field(int, ref) + __field(int, active) + __field(int, reason) ), TP_fast_assign( @@ -1509,6 +1413,199 @@ TRACE_EVENT(afs_cell, __entry->active) ); +TRACE_EVENT(afs_alist, + TP_PROTO(unsigned int alist_debug_id, int ref, enum afs_alist_trace reason), + + TP_ARGS(alist_debug_id, ref, reason), + + TP_STRUCT__entry( + __field(unsigned int, alist) + __field(int, ref) + __field(int, active) + __field(int, reason) + ), + + TP_fast_assign( + __entry->alist = alist_debug_id; + __entry->ref = ref; + __entry->reason = reason; + ), + + TP_printk("AL=%08x %s r=%d", + __entry->alist, + __print_symbolic(__entry->reason, afs_alist_traces), + __entry->ref) + ); + +TRACE_EVENT(afs_estate, + TP_PROTO(unsigned int server_debug_id, unsigned int estate_debug_id, + int ref, enum afs_estate_trace reason), + + TP_ARGS(server_debug_id, estate_debug_id, ref, reason), + + TP_STRUCT__entry( + __field(unsigned int, server) + __field(unsigned int, estate) + __field(int, ref) + __field(int, active) + __field(int, reason) + ), + + TP_fast_assign( + __entry->server = server_debug_id; + __entry->estate = estate_debug_id; + __entry->ref = ref; + __entry->reason = reason; + ), + + TP_printk("ES=%08x[%x] %s r=%d", + __entry->server, + __entry->estate, + __print_symbolic(__entry->reason, afs_estate_traces), + __entry->ref) + ); + +TRACE_EVENT(afs_fs_probe, + TP_PROTO(struct afs_server *server, bool tx, struct afs_endpoint_state *estate, + unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us), + + TP_ARGS(server, tx, estate, addr_index, error, abort_code, rtt_us), + + TP_STRUCT__entry( + __field(unsigned int, server) + __field(unsigned int, estate) + __field(bool, tx) + __field(u16, addr_index) + __field(short, error) + __field(s32, abort_code) + __field(unsigned int, rtt_us) + __field_struct(struct sockaddr_rxrpc, srx) + ), + + TP_fast_assign( + struct afs_addr_list *alist = estate->addresses; + __entry->server = server->debug_id; + __entry->estate = estate->probe_seq; + __entry->tx = tx; + __entry->addr_index = addr_index; + __entry->error = error; + __entry->abort_code = abort_code; + __entry->rtt_us = rtt_us; + memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer), + sizeof(__entry->srx)); + ), + + TP_printk("s=%08x %s pq=%x ax=%u e=%d ac=%d rtt=%d %pISpc", + __entry->server, __entry->tx ? "tx" : "rx", __entry->estate, + __entry->addr_index, __entry->error, __entry->abort_code, __entry->rtt_us, + &__entry->srx.transport) + ); + +TRACE_EVENT(afs_vl_probe, + TP_PROTO(struct afs_vlserver *server, bool tx, struct afs_addr_list *alist, + unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us), + + TP_ARGS(server, tx, alist, addr_index, error, abort_code, rtt_us), + + TP_STRUCT__entry( + __field(unsigned int, server) + __field(bool, tx) + __field(unsigned short, flags) + __field(u16, addr_index) + __field(short, error) + __field(s32, abort_code) + __field(unsigned int, rtt_us) + __field_struct(struct sockaddr_rxrpc, srx) + ), + + TP_fast_assign( + __entry->server = server->debug_id; + __entry->tx = tx; + __entry->addr_index = addr_index; + __entry->error = error; + __entry->abort_code = abort_code; + __entry->rtt_us = rtt_us; + memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer), + sizeof(__entry->srx)); + ), + + TP_printk("vl=%08x %s ax=%u e=%d ac=%d rtt=%d %pISpc", + __entry->server, __entry->tx ? "tx" : "rx", __entry->addr_index, + __entry->error, __entry->abort_code, __entry->rtt_us, + &__entry->srx.transport) + ); + +TRACE_EVENT(afs_rotate, + TP_PROTO(struct afs_operation *op, enum afs_rotate_trace reason, unsigned int extra), + + TP_ARGS(op, reason, extra), + + TP_STRUCT__entry( + __field(unsigned int, op) + __field(unsigned int, flags) + __field(unsigned int, extra) + __field(unsigned short, iteration) + __field(short, server_index) + __field(short, addr_index) + __field(enum afs_rotate_trace, reason) + ), + + TP_fast_assign( + __entry->op = op->debug_id; + __entry->flags = op->flags; + __entry->iteration = op->nr_iterations; + __entry->server_index = op->server_index; + __entry->addr_index = op->addr_index; + __entry->reason = reason; + __entry->extra = extra; + ), + + TP_printk("OP=%08x it=%02x %s fl=%x sx=%d ax=%d ext=%d", + __entry->op, + __entry->iteration, + __print_symbolic(__entry->reason, afs_rotate_traces), + __entry->flags, + __entry->server_index, + __entry->addr_index, + __entry->extra) + ); + +TRACE_EVENT(afs_make_call, + TP_PROTO(struct afs_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(unsigned int, call) + __field(bool, is_vl) + __field(enum afs_fs_operation, op) + __field_struct(struct afs_fid, fid) + __field_struct(struct sockaddr_rxrpc, srx) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->op = call->operation_ID; + __entry->fid = call->fid; + memcpy(&__entry->srx, rxrpc_kernel_remote_srx(call->peer), + sizeof(__entry->srx)); + __entry->srx.srx_service = call->service_id; + __entry->is_vl = (__entry->srx.srx_service == VL_SERVICE || + __entry->srx.srx_service == YFS_VL_SERVICE); + ), + + TP_printk("c=%08x %pISpc+%u %s %llx:%llx:%x", + __entry->call, + &__entry->srx.transport, + __entry->srx.srx_service, + __entry->is_vl ? + __print_symbolic(__entry->op, afs_vl_operations) : + __print_symbolic(__entry->op, afs_fs_operations), + __entry->fid.vid, + __entry->fid.vnode, + __entry->fid.unique) + ); + #endif /* _TRACE_AFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index b2db2c2f1c57..90b0222390e5 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -21,7 +21,7 @@ struct btrfs_delayed_data_ref; struct btrfs_delayed_ref_head; struct btrfs_block_group; struct btrfs_free_cluster; -struct map_lookup; +struct btrfs_chunk_map; struct extent_buffer; struct btrfs_work; struct btrfs_workqueue; @@ -265,20 +265,20 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, __print_symbolic_u64(type, \ { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \ { EXTENT_MAP_HOLE, "HOLE" }, \ - { EXTENT_MAP_INLINE, "INLINE" }, \ - { EXTENT_MAP_DELALLOC, "DELALLOC" }) + { EXTENT_MAP_INLINE, "INLINE" }) #define show_map_type(type) \ type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type) #define show_map_flags(flag) \ __print_flags(flag, "|", \ - { (1 << EXTENT_FLAG_PINNED), "PINNED" },\ - { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\ - { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\ - { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\ - { (1 << EXTENT_FLAG_FILLING), "FILLING" },\ - { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" }) + { EXTENT_FLAG_PINNED, "PINNED" },\ + { EXTENT_FLAG_COMPRESS_ZLIB, "COMPRESS_ZLIB" },\ + { EXTENT_FLAG_COMPRESS_LZO, "COMPRESS_LZO" },\ + { EXTENT_FLAG_COMPRESS_ZSTD, "COMPRESS_ZSTD" },\ + { EXTENT_FLAG_PREALLOC, "PREALLOC" },\ + { EXTENT_FLAG_LOGGING, "LOGGING" },\ + { EXTENT_FLAG_FILLING, "FILLING" }) TRACE_EVENT_CONDITION(btrfs_get_extent, @@ -297,9 +297,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, __field( u64, orig_start ) __field( u64, block_start ) __field( u64, block_len ) - __field( unsigned long, flags ) + __field( u32, flags ) __field( int, refs ) - __field( unsigned int, compress_type ) ), TP_fast_assign_btrfs(root->fs_info, @@ -312,13 +311,11 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, __entry->block_len = map->block_len; __entry->flags = map->flags; __entry->refs = refcount_read(&map->refs); - __entry->compress_type = map->compress_type; ), TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu " "orig_start=%llu block_start=%llu(%s) " - "block_len=%llu flags=%s refs=%u " - "compress_type=%u", + "block_len=%llu flags=%s refs=%u", show_root_type(__entry->root_objectid), __entry->ino, __entry->start, @@ -327,7 +324,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, show_map_type(__entry->block_start), __entry->block_len, show_map_flags(__entry->flags), - __entry->refs, __entry->compress_type) + __entry->refs) ); TRACE_EVENT(btrfs_handle_em_exist, @@ -1061,7 +1058,7 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head, DECLARE_EVENT_CLASS(btrfs__chunk, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct map_lookup *map, u64 offset, u64 size), + const struct btrfs_chunk_map *map, u64 offset, u64 size), TP_ARGS(fs_info, map, offset, size), @@ -1095,7 +1092,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk, DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct map_lookup *map, u64 offset, u64 size), + const struct btrfs_chunk_map *map, u64 offset, u64 size), TP_ARGS(fs_info, map, offset, size) ); @@ -1103,7 +1100,7 @@ DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc, DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct map_lookup *map, u64 offset, u64 size), + const struct btrfs_chunk_map *map, u64 offset, u64 size), TP_ARGS(fs_info, map, offset, size) ); @@ -1561,7 +1558,6 @@ DECLARE_EVENT_CLASS(btrfs__work, __field( const void *, wq ) __field( const void *, func ) __field( const void *, ordered_func ) - __field( const void *, ordered_free ) __field( const void *, normal_work ) ), @@ -1570,14 +1566,12 @@ DECLARE_EVENT_CLASS(btrfs__work, __entry->wq = work->wq; __entry->func = work->func; __entry->ordered_func = work->ordered_func; - __entry->ordered_free = work->ordered_free; __entry->normal_work = &work->normal_work; ), - TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p " - "ordered_free=%p", + TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p", __entry->work, __entry->normal_work, __entry->wq, - __entry->func, __entry->ordered_func, __entry->ordered_free) + __entry->func, __entry->ordered_func) ); /* @@ -2102,17 +2096,12 @@ TRACE_EVENT(btrfs_set_extent_bit, __field( unsigned, set_bits) ), - TP_fast_assign_btrfs(tree->fs_info, - __entry->owner = tree->owner; - if (tree->inode) { - const struct btrfs_inode *inode = tree->inode; + TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree), + const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree); - __entry->ino = btrfs_ino(inode); - __entry->rootid = inode->root->root_key.objectid; - } else { - __entry->ino = 0; - __entry->rootid = 0; - } + __entry->owner = tree->owner; + __entry->ino = inode ? btrfs_ino(inode) : 0; + __entry->rootid = inode ? inode->root->root_key.objectid : 0; __entry->start = start; __entry->len = len; __entry->set_bits = set_bits; @@ -2140,17 +2129,12 @@ TRACE_EVENT(btrfs_clear_extent_bit, __field( unsigned, clear_bits) ), - TP_fast_assign_btrfs(tree->fs_info, - __entry->owner = tree->owner; - if (tree->inode) { - const struct btrfs_inode *inode = tree->inode; + TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree), + const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree); - __entry->ino = btrfs_ino(inode); - __entry->rootid = inode->root->root_key.objectid; - } else { - __entry->ino = 0; - __entry->rootid = 0; - } + __entry->owner = tree->owner; + __entry->ino = inode ? btrfs_ino(inode) : 0; + __entry->rootid = inode ? inode->root->root_key.objectid : 0; __entry->start = start; __entry->len = len; __entry->clear_bits = clear_bits; @@ -2179,17 +2163,12 @@ TRACE_EVENT(btrfs_convert_extent_bit, __field( unsigned, clear_bits) ), - TP_fast_assign_btrfs(tree->fs_info, - __entry->owner = tree->owner; - if (tree->inode) { - const struct btrfs_inode *inode = tree->inode; + TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree), + const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree); - __entry->ino = btrfs_ino(inode); - __entry->rootid = inode->root->root_key.objectid; - } else { - __entry->ino = 0; - __entry->rootid = 0; - } + __entry->owner = tree->owner; + __entry->ino = inode ? btrfs_ino(inode) : 0; + __entry->rootid = inode ? inode->root->root_key.objectid : 0; __entry->start = start; __entry->len = len; __entry->set_bits = set_bits; @@ -2497,6 +2476,82 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_write, TP_ARGS(rbio, bio, trace_info) ); +TRACE_EVENT(btrfs_insert_one_raid_extent, + + TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length, + int num_stripes), + + TP_ARGS(fs_info, logical, length, num_stripes), + + TP_STRUCT__entry_btrfs( + __field( u64, logical ) + __field( u64, length ) + __field( int, num_stripes ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->logical = logical; + __entry->length = length; + __entry->num_stripes = num_stripes; + ), + + TP_printk_btrfs("logical=%llu length=%llu num_stripes=%d", + __entry->logical, __entry->length, + __entry->num_stripes) +); + +TRACE_EVENT(btrfs_raid_extent_delete, + + TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 end, + u64 found_start, u64 found_end), + + TP_ARGS(fs_info, start, end, found_start, found_end), + + TP_STRUCT__entry_btrfs( + __field( u64, start ) + __field( u64, end ) + __field( u64, found_start ) + __field( u64, found_end ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->start = start; + __entry->end = end; + __entry->found_start = found_start; + __entry->found_end = found_end; + ), + + TP_printk_btrfs("start=%llu end=%llu found_start=%llu found_end=%llu", + __entry->start, __entry->end, __entry->found_start, + __entry->found_end) +); + +TRACE_EVENT(btrfs_get_raid_extent_offset, + + TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length, + u64 physical, u64 devid), + + TP_ARGS(fs_info, logical, length, physical, devid), + + TP_STRUCT__entry_btrfs( + __field( u64, logical ) + __field( u64, length ) + __field( u64, physical ) + __field( u64, devid ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->logical = logical; + __entry->length = length; + __entry->physical = physical; + __entry->devid = devid; + ), + + TP_printk_btrfs("logical=%llu length=%llu physical=%llu devid=%llu", + __entry->logical, __entry->length, __entry->physical, + __entry->devid) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h index 67e9d01f80c2..58cc83b99c34 100644 --- a/include/trace/events/csd.h +++ b/include/trace/events/csd.h @@ -12,7 +12,7 @@ TRACE_EVENT(csd_queue_cpu, TP_PROTO(const unsigned int cpu, unsigned long callsite, smp_call_func_t func, - struct __call_single_data *csd), + call_single_data_t *csd), TP_ARGS(cpu, callsite, func, csd), @@ -39,7 +39,7 @@ TRACE_EVENT(csd_queue_cpu, */ DECLARE_EVENT_CLASS(csd_function, - TP_PROTO(smp_call_func_t func, struct __call_single_data *csd), + TP_PROTO(smp_call_func_t func, call_single_data_t *csd), TP_ARGS(func, csd), @@ -57,12 +57,12 @@ DECLARE_EVENT_CLASS(csd_function, ); DEFINE_EVENT(csd_function, csd_function_entry, - TP_PROTO(smp_call_func_t func, struct __call_single_data *csd), + TP_PROTO(smp_call_func_t func, call_single_data_t *csd), TP_ARGS(func, csd) ); DEFINE_EVENT(csd_function, csd_function_exit, - TP_PROTO(smp_call_func_t func, struct __call_single_data *csd), + TP_PROTO(smp_call_func_t func, call_single_data_t *csd), TP_ARGS(func, csd) ); diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h index c79f1d4c39af..23200aabccac 100644 --- a/include/trace/events/damon.h +++ b/include/trace/events/damon.h @@ -9,12 +9,51 @@ #include <linux/types.h> #include <linux/tracepoint.h> +TRACE_EVENT_CONDITION(damos_before_apply, + + TP_PROTO(unsigned int context_idx, unsigned int scheme_idx, + unsigned int target_idx, struct damon_region *r, + unsigned int nr_regions, bool do_trace), + + TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace), + + TP_CONDITION(do_trace), + + TP_STRUCT__entry( + __field(unsigned int, context_idx) + __field(unsigned int, scheme_idx) + __field(unsigned long, target_idx) + __field(unsigned long, start) + __field(unsigned long, end) + __field(unsigned int, nr_accesses) + __field(unsigned int, age) + __field(unsigned int, nr_regions) + ), + + TP_fast_assign( + __entry->context_idx = context_idx; + __entry->scheme_idx = scheme_idx; + __entry->target_idx = target_idx; + __entry->start = r->ar.start; + __entry->end = r->ar.end; + __entry->nr_accesses = r->nr_accesses_bp / 10000; + __entry->age = r->age; + __entry->nr_regions = nr_regions; + ), + + TP_printk("ctx_idx=%u scheme_idx=%u target_idx=%lu nr_regions=%u %lu-%lu: %u %u", + __entry->context_idx, __entry->scheme_idx, + __entry->target_idx, __entry->nr_regions, + __entry->start, __entry->end, + __entry->nr_accesses, __entry->age) +); + TRACE_EVENT(damon_aggregated, - TP_PROTO(struct damon_target *t, unsigned int target_id, - struct damon_region *r, unsigned int nr_regions), + TP_PROTO(unsigned int target_id, struct damon_region *r, + unsigned int nr_regions), - TP_ARGS(t, target_id, r, nr_regions), + TP_ARGS(target_id, r, nr_regions), TP_STRUCT__entry( __field(unsigned long, target_id) diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 65029dfb92fb..a697f4b77162 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -772,15 +772,14 @@ TRACE_EVENT(ext4_mb_release_group_pa, ); TRACE_EVENT(ext4_discard_preallocations, - TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed), + TP_PROTO(struct inode *inode, unsigned int len), - TP_ARGS(inode, len, needed), + TP_ARGS(inode, len), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field( unsigned int, len ) - __field( unsigned int, needed ) ), @@ -788,13 +787,11 @@ TRACE_EVENT(ext4_discard_preallocations, __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->len = len; - __entry->needed = needed; ), - TP_printk("dev %d,%d ino %lu len: %u needed %u", + TP_printk("dev %d,%d ino %lu len: %u", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, __entry->len, - __entry->needed) + (unsigned long) __entry->ino, __entry->len) ); TRACE_EVENT(ext4_mb_discard_preallocations, diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 793f82cc1515..7ed0fc430dc6 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -161,6 +161,19 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE); { EX_READ, "Read" }, \ { EX_BLOCK_AGE, "Block Age" }) +#define show_inode_type(x) \ + __print_symbolic(x, \ + { S_IFLNK, "symbolic" }, \ + { S_IFREG, "regular" }, \ + { S_IFDIR, "directory" }, \ + { S_IFCHR, "character" }, \ + { S_IFBLK, "block" }, \ + { S_IFIFO, "fifo" }, \ + { S_IFSOCK, "sock" }) + +#define S_ALL_PERM (S_ISUID | S_ISGID | S_ISVTX | \ + S_IRWXU | S_IRWXG | S_IRWXO) + struct f2fs_sb_info; struct f2fs_io_info; struct extent_info; @@ -215,17 +228,21 @@ DECLARE_EVENT_CLASS(f2fs__inode_exit, TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) + __field(umode_t, mode) __field(int, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; __entry->ret = ret; ), - TP_printk("dev = (%d,%d), ino = %lu, ret = %d", + TP_printk("dev = (%d,%d), ino = %lu, type: %s, mode = 0%o, ret = %d", show_dev_ino(__entry), + show_inode_type(__entry->mode & S_IFMT), + __entry->mode & S_ALL_PERM, __entry->ret) ); @@ -866,6 +883,75 @@ TRACE_EVENT(f2fs_lookup_end, __entry->err) ); +TRACE_EVENT(f2fs_rename_start, + + TP_PROTO(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags), + + TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, flags), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __string(old_name, old_dentry->d_name.name) + __field(ino_t, new_pino) + __string(new_name, new_dentry->d_name.name) + __field(unsigned int, flags) + ), + + TP_fast_assign( + __entry->dev = old_dir->i_sb->s_dev; + __entry->ino = old_dir->i_ino; + __assign_str(old_name, old_dentry->d_name.name); + __entry->new_pino = new_dir->i_ino; + __assign_str(new_name, new_dentry->d_name.name); + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), old_dir = %lu, old_name: %s, " + "new_dir = %lu, new_name: %s, flags = %u", + show_dev_ino(__entry), + __get_str(old_name), + __entry->new_pino, + __get_str(new_name), + __entry->flags) +); + +TRACE_EVENT(f2fs_rename_end, + + TP_PROTO(struct dentry *old_dentry, struct dentry *new_dentry, + unsigned int flags, int ret), + + TP_ARGS(old_dentry, new_dentry, flags, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __string(old_name, old_dentry->d_name.name) + __string(new_name, new_dentry->d_name.name) + __field(unsigned int, flags) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = old_dentry->d_sb->s_dev; + __entry->ino = old_dentry->d_inode->i_ino; + __assign_str(old_name, old_dentry->d_name.name); + __assign_str(new_name, new_dentry->d_name.name); + __entry->flags = flags; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, old_name: %s, " + "new_name: %s, flags = %u, ret = %d", + show_dev_ino(__entry), + __get_str(old_name), + __get_str(new_name), + __entry->flags, + __entry->ret) +); + TRACE_EVENT(f2fs_readdir, TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err), @@ -1283,13 +1369,6 @@ DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty, TP_ARGS(page, type) ); -DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite, - - TP_PROTO(struct page *page, int type), - - TP_ARGS(page, type) -); - TRACE_EVENT(f2fs_replace_atomic_write_block, TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index, @@ -1327,30 +1406,50 @@ TRACE_EVENT(f2fs_replace_atomic_write_block, __entry->recovery) ); -TRACE_EVENT(f2fs_filemap_fault, +DECLARE_EVENT_CLASS(f2fs_mmap, - TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret), + TP_PROTO(struct inode *inode, pgoff_t index, + vm_flags_t flags, vm_fault_t ret), - TP_ARGS(inode, index, ret), + TP_ARGS(inode, index, flags, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(pgoff_t, index) - __field(unsigned long, ret) + __field(vm_flags_t, flags) + __field(vm_fault_t, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->index = index; + __entry->flags = flags; __entry->ret = ret; ), - TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx", + TP_printk("dev = (%d,%d), ino = %lu, index = %lu, flags: %s, ret: %s", show_dev_ino(__entry), (unsigned long)__entry->index, - __entry->ret) + __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE), + __print_flags(__entry->ret, "|", VM_FAULT_RESULT_TRACE)) +); + +DEFINE_EVENT(f2fs_mmap, f2fs_filemap_fault, + + TP_PROTO(struct inode *inode, pgoff_t index, + vm_flags_t flags, vm_fault_t ret), + + TP_ARGS(inode, index, flags, ret) +); + +DEFINE_EVENT(f2fs_mmap, f2fs_vm_page_mkwrite, + + TP_PROTO(struct inode *inode, pgoff_t index, + vm_flags_t flags, vm_fault_t ret), + + TP_ARGS(inode, index, flags, ret) ); TRACE_EVENT(f2fs_writepages, diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h index 951643e6a7a9..a78d21fa9f29 100644 --- a/include/trace/events/habanalabs.h +++ b/include/trace/events/habanalabs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright 2016-2021 HabanaLabs, Ltd. + * Copyright 2022-2023 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -87,6 +87,49 @@ DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_free, TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller), TP_ARGS(dev, cpu_addr, dma_addr, size, caller)); +DECLARE_EVENT_CLASS(habanalabs_dma_map_template, + TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len, + enum dma_data_direction dir, const char *caller), + + TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller), + + TP_STRUCT__entry( + __string(dname, dev_name(dev)) + __field(u64, phys_addr) + __field(u64, dma_addr) + __field(u32, len) + __field(int, dir) + __field(const char *, caller) + ), + + TP_fast_assign( + __assign_str(dname, dev_name(dev)); + __entry->phys_addr = phys_addr; + __entry->dma_addr = dma_addr; + __entry->len = len; + __entry->dir = dir; + __entry->caller = caller; + ), + + TP_printk("%s: phys_addr: %#llx, dma_addr: %#llx, len: %#x, dir: %d, caller: %s", + __get_str(dname), + __entry->phys_addr, + __entry->dma_addr, + __entry->len, + __entry->dir, + __entry->caller) +); + +DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_map_page, + TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len, + enum dma_data_direction dir, const char *caller), + TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller)); + +DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_unmap_page, + TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len, + enum dma_data_direction dir, const char *caller), + TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller)); + DECLARE_EVENT_CLASS(habanalabs_comms_template, TP_PROTO(struct device *dev, char *op_str), diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index d7353024016c..af0af3f1d9b7 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -10,25 +10,25 @@ TRACE_EVENT(ifs_status, - TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status), + TP_PROTO(int cpu, int start, int stop, u64 status), - TP_ARGS(cpu, activate, status), + TP_ARGS(cpu, start, stop, status), TP_STRUCT__entry( __field( u64, status ) __field( int, cpu ) - __field( u8, start ) - __field( u8, stop ) + __field( u16, start ) + __field( u16, stop ) ), TP_fast_assign( __entry->cpu = cpu; - __entry->start = activate.start; - __entry->stop = activate.stop; - __entry->status = status.data; + __entry->start = start; + __entry->stop = stop; + __entry->status = status; ), - TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx", + TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx", __entry->cpu, __entry->start, __entry->stop, diff --git a/include/trace/events/ksm.h b/include/trace/events/ksm.h index b5ac35c1d0e8..e728647b5d26 100644 --- a/include/trace/events/ksm.h +++ b/include/trace/events/ksm.h @@ -245,6 +245,39 @@ TRACE_EVENT(ksm_remove_rmap_item, __entry->pfn, __entry->rmap_item, __entry->mm) ); +/** + * ksm_advisor - called after the advisor has run + * + * @scan_time: scan time in seconds + * @pages_to_scan: new pages_to_scan value + * @cpu_percent: cpu usage in percent + * + * Allows to trace the ksm advisor. + */ +TRACE_EVENT(ksm_advisor, + + TP_PROTO(s64 scan_time, unsigned long pages_to_scan, + unsigned int cpu_percent), + + TP_ARGS(scan_time, pages_to_scan, cpu_percent), + + TP_STRUCT__entry( + __field(s64, scan_time) + __field(unsigned long, pages_to_scan) + __field(unsigned int, cpu_percent) + ), + + TP_fast_assign( + __entry->scan_time = scan_time; + __entry->pages_to_scan = pages_to_scan; + __entry->cpu_percent = cpu_percent; + ), + + TP_printk("ksm scan time %lld pages_to_scan %lu cpu percent %u", + __entry->scan_time, __entry->pages_to_scan, + __entry->cpu_percent) +); + #endif /* _TRACE_KSM_H */ /* This part must be outside protection */ diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 3bd31ea23fee..011fba6b5552 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -62,7 +62,7 @@ TRACE_EVENT(kvm_vcpu_wakeup, __entry->valid ? "valid" : "invalid") ); -#if defined(CONFIG_HAVE_KVM_IRQFD) +#if defined(CONFIG_HAVE_KVM_IRQCHIP) TRACE_EVENT(kvm_set_irq, TP_PROTO(unsigned int gsi, int level, int irq_source_id), TP_ARGS(gsi, level, irq_source_id), @@ -82,7 +82,7 @@ TRACE_EVENT(kvm_set_irq, TP_printk("gsi %u level %d source %d", __entry->gsi, __entry->level, __entry->irq_source_id) ); -#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */ +#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */ #if defined(__KVM_HAVE_IOAPIC) #define kvm_deliver_mode \ @@ -170,7 +170,7 @@ TRACE_EVENT(kvm_msi_set_irq, #endif /* defined(__KVM_HAVE_IOAPIC) */ -#if defined(CONFIG_HAVE_KVM_IRQFD) +#if defined(CONFIG_HAVE_KVM_IRQCHIP) #ifdef kvm_irqchips #define kvm_ack_irq_string "irqchip %s pin %u" @@ -197,7 +197,7 @@ TRACE_EVENT(kvm_ack_irq, TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm) ); -#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */ +#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */ diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 061b5128f335..0190ef725b43 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -49,10 +49,11 @@ TRACE_EVENT(mm_migrate_pages, TP_PROTO(unsigned long succeeded, unsigned long failed, unsigned long thp_succeeded, unsigned long thp_failed, - unsigned long thp_split, enum migrate_mode mode, int reason), + unsigned long thp_split, unsigned long large_folio_split, + enum migrate_mode mode, int reason), TP_ARGS(succeeded, failed, thp_succeeded, thp_failed, - thp_split, mode, reason), + thp_split, large_folio_split, mode, reason), TP_STRUCT__entry( __field( unsigned long, succeeded) @@ -60,26 +61,29 @@ TRACE_EVENT(mm_migrate_pages, __field( unsigned long, thp_succeeded) __field( unsigned long, thp_failed) __field( unsigned long, thp_split) + __field( unsigned long, large_folio_split) __field( enum migrate_mode, mode) __field( int, reason) ), TP_fast_assign( - __entry->succeeded = succeeded; - __entry->failed = failed; - __entry->thp_succeeded = thp_succeeded; - __entry->thp_failed = thp_failed; - __entry->thp_split = thp_split; - __entry->mode = mode; - __entry->reason = reason; + __entry->succeeded = succeeded; + __entry->failed = failed; + __entry->thp_succeeded = thp_succeeded; + __entry->thp_failed = thp_failed; + __entry->thp_split = thp_split; + __entry->large_folio_split = large_folio_split; + __entry->mode = mode; + __entry->reason = reason; ), - TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s", + TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu nr_split=%lu mode=%s reason=%s", __entry->succeeded, __entry->failed, __entry->thp_succeeded, __entry->thp_failed, __entry->thp_split, + __entry->large_folio_split, __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 1478b9dd05fa..d801409b33cf 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -144,7 +144,7 @@ IF_HAVE_PG_ARCH_X(arch_3) #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } #elif defined(CONFIG_PPC) #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } -#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64) +#elif defined(CONFIG_PARISC) #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } #elif !defined(CONFIG_MMU) #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" } diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h index 563e48617374..09e72215b9f9 100644 --- a/include/trace/events/mptcp.h +++ b/include/trace/events/mptcp.h @@ -44,7 +44,7 @@ TRACE_EVENT(mptcp_subflow_get_send, ssk = mptcp_subflow_tcp_sock(subflow); if (ssk && sk_fullsock(ssk)) { __entry->snd_wnd = tcp_sk(ssk)->snd_wnd; - __entry->pace = ssk->sk_pacing_rate; + __entry->pace = READ_ONCE(ssk->sk_pacing_rate); } else { __entry->snd_wnd = 0; __entry->pace = 0; diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h index 5eaa1fa99171..833143d0992e 100644 --- a/include/trace/events/neigh.h +++ b/include/trace/events/neigh.h @@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create, ), TP_fast_assign( - struct in6_addr *pin6; __be32 *p32; __entry->family = tbl->family; @@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create, __entry->entries = atomic_read(&tbl->gc_entries); __entry->created = n != NULL; __entry->gc_exempt = exempt_from_gc; - pin6 = (struct in6_addr *)__entry->primary_key6; p32 = (__be32 *)__entry->primary_key4; if (tbl->family == AF_INET) @@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create, #if IS_ENABLED(CONFIG_IPV6) if (tbl->family == AF_INET6) { + struct in6_addr *pin6; + pin6 = (struct in6_addr *)__entry->primary_key6; *pin6 = *(struct in6_addr *)pkey; } diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index beec534cbaab..447a8c21cf57 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -16,34 +16,57 @@ * Define enums for tracing information. */ #define netfs_read_traces \ + EM(netfs_read_trace_dio_read, "DIO-READ ") \ EM(netfs_read_trace_expanded, "EXPANDED ") \ EM(netfs_read_trace_readahead, "READAHEAD") \ EM(netfs_read_trace_readpage, "READPAGE ") \ + EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \ E_(netfs_read_trace_write_begin, "WRITEBEGN") +#define netfs_write_traces \ + EM(netfs_write_trace_dio_write, "DIO-WRITE") \ + EM(netfs_write_trace_launder, "LAUNDER ") \ + EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \ + EM(netfs_write_trace_writeback, "WRITEBACK") \ + E_(netfs_write_trace_writethrough, "WRITETHRU") + #define netfs_rreq_origins \ EM(NETFS_READAHEAD, "RA") \ EM(NETFS_READPAGE, "RP") \ - E_(NETFS_READ_FOR_WRITE, "RW") + EM(NETFS_READ_FOR_WRITE, "RW") \ + EM(NETFS_WRITEBACK, "WB") \ + EM(NETFS_WRITETHROUGH, "WT") \ + EM(NETFS_LAUNDER_WRITE, "LW") \ + EM(NETFS_UNBUFFERED_WRITE, "UW") \ + EM(NETFS_DIO_READ, "DR") \ + E_(NETFS_DIO_WRITE, "DW") #define netfs_rreq_traces \ EM(netfs_rreq_trace_assess, "ASSESS ") \ EM(netfs_rreq_trace_copy, "COPY ") \ EM(netfs_rreq_trace_done, "DONE ") \ EM(netfs_rreq_trace_free, "FREE ") \ + EM(netfs_rreq_trace_redirty, "REDIRTY") \ EM(netfs_rreq_trace_resubmit, "RESUBMT") \ EM(netfs_rreq_trace_unlock, "UNLOCK ") \ - E_(netfs_rreq_trace_unmark, "UNMARK ") + EM(netfs_rreq_trace_unmark, "UNMARK ") \ + EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \ + EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \ + E_(netfs_rreq_trace_write_done, "WR-DONE") #define netfs_sreq_sources \ EM(NETFS_FILL_WITH_ZEROES, "ZERO") \ EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \ EM(NETFS_READ_FROM_CACHE, "READ") \ - E_(NETFS_INVALID_READ, "INVL") \ + EM(NETFS_INVALID_READ, "INVL") \ + EM(NETFS_UPLOAD_TO_SERVER, "UPLD") \ + EM(NETFS_WRITE_TO_CACHE, "WRIT") \ + E_(NETFS_INVALID_WRITE, "INVL") #define netfs_sreq_traces \ EM(netfs_sreq_trace_download_instead, "RDOWN") \ EM(netfs_sreq_trace_free, "FREE ") \ + EM(netfs_sreq_trace_limited, "LIMIT") \ EM(netfs_sreq_trace_prepare, "PREP ") \ EM(netfs_sreq_trace_resubmit_short, "SHORT") \ EM(netfs_sreq_trace_submit, "SUBMT") \ @@ -55,19 +78,24 @@ #define netfs_failures \ EM(netfs_fail_check_write_begin, "check-write-begin") \ EM(netfs_fail_copy_to_cache, "copy-to-cache") \ + EM(netfs_fail_dio_read_short, "dio-read-short") \ + EM(netfs_fail_dio_read_zero, "dio-read-zero") \ EM(netfs_fail_read, "read") \ EM(netfs_fail_short_read, "short-read") \ - E_(netfs_fail_prepare_write, "prep-write") + EM(netfs_fail_prepare_write, "prep-write") \ + E_(netfs_fail_write, "write") #define netfs_rreq_ref_traces \ - EM(netfs_rreq_trace_get_hold, "GET HOLD ") \ + EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \ EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \ EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \ EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \ EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \ - EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \ + EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \ + EM(netfs_rreq_trace_put_return, "PUT RETURN ") \ EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \ - EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \ + EM(netfs_rreq_trace_put_work, "PUT WORK ") \ + EM(netfs_rreq_trace_see_work, "SEE WORK ") \ E_(netfs_rreq_trace_new, "NEW ") #define netfs_sreq_ref_traces \ @@ -76,11 +104,44 @@ EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \ EM(netfs_sreq_trace_new, "NEW ") \ EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \ + EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \ EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \ EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \ EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \ + EM(netfs_sreq_trace_put_wip, "PUT WIP ") \ + EM(netfs_sreq_trace_put_work, "PUT WORK ") \ E_(netfs_sreq_trace_put_terminated, "PUT TERM ") +#define netfs_folio_traces \ + /* The first few correspond to enum netfs_how_to_modify */ \ + EM(netfs_folio_is_uptodate, "mod-uptodate") \ + EM(netfs_just_prefetch, "mod-prefetch") \ + EM(netfs_whole_folio_modify, "mod-whole-f") \ + EM(netfs_modify_and_clear, "mod-n-clear") \ + EM(netfs_streaming_write, "mod-streamw") \ + EM(netfs_streaming_write_cont, "mod-streamw+") \ + EM(netfs_flush_content, "flush") \ + EM(netfs_streaming_filled_page, "mod-streamw-f") \ + EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \ + /* The rest are for writeback */ \ + EM(netfs_folio_trace_clear, "clear") \ + EM(netfs_folio_trace_clear_s, "clear-s") \ + EM(netfs_folio_trace_clear_g, "clear-g") \ + EM(netfs_folio_trace_copy_to_cache, "copy") \ + EM(netfs_folio_trace_end_copy, "end-copy") \ + EM(netfs_folio_trace_filled_gaps, "filled-gaps") \ + EM(netfs_folio_trace_kill, "kill") \ + EM(netfs_folio_trace_launder, "launder") \ + EM(netfs_folio_trace_mkwrite, "mkwrite") \ + EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \ + EM(netfs_folio_trace_read_gaps, "read-gaps") \ + EM(netfs_folio_trace_redirty, "redirty") \ + EM(netfs_folio_trace_redirtied, "redirtied") \ + EM(netfs_folio_trace_store, "store") \ + EM(netfs_folio_trace_store_plus, "store+") \ + EM(netfs_folio_trace_wthru, "wthru") \ + E_(netfs_folio_trace_wthru_plus, "wthru+") + #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY @@ -90,11 +151,13 @@ #define E_(a, b) a enum netfs_read_trace { netfs_read_traces } __mode(byte); +enum netfs_write_trace { netfs_write_traces } __mode(byte); enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte); enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte); enum netfs_failure { netfs_failures } __mode(byte); enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte); enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); +enum netfs_folio_trace { netfs_folio_traces } __mode(byte); #endif @@ -107,6 +170,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); #define E_(a, b) TRACE_DEFINE_ENUM(a); netfs_read_traces; +netfs_write_traces; netfs_rreq_origins; netfs_rreq_traces; netfs_sreq_sources; @@ -114,6 +178,7 @@ netfs_sreq_traces; netfs_failures; netfs_rreq_ref_traces; netfs_sreq_ref_traces; +netfs_folio_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -314,6 +379,82 @@ TRACE_EVENT(netfs_sreq_ref, __entry->ref) ); +TRACE_EVENT(netfs_folio, + TP_PROTO(struct folio *folio, enum netfs_folio_trace why), + + TP_ARGS(folio, why), + + TP_STRUCT__entry( + __field(ino_t, ino) + __field(pgoff_t, index) + __field(unsigned int, nr) + __field(enum netfs_folio_trace, why) + ), + + TP_fast_assign( + __entry->ino = folio->mapping->host->i_ino; + __entry->why = why; + __entry->index = folio_index(folio); + __entry->nr = folio_nr_pages(folio); + ), + + TP_printk("i=%05lx ix=%05lx-%05lx %s", + __entry->ino, __entry->index, __entry->index + __entry->nr - 1, + __print_symbolic(__entry->why, netfs_folio_traces)) + ); + +TRACE_EVENT(netfs_write_iter, + TP_PROTO(const struct kiocb *iocb, const struct iov_iter *from), + + TP_ARGS(iocb, from), + + TP_STRUCT__entry( + __field(unsigned long long, start ) + __field(size_t, len ) + __field(unsigned int, flags ) + ), + + TP_fast_assign( + __entry->start = iocb->ki_pos; + __entry->len = iov_iter_count(from); + __entry->flags = iocb->ki_flags; + ), + + TP_printk("WRITE-ITER s=%llx l=%zx f=%x", + __entry->start, __entry->len, __entry->flags) + ); + +TRACE_EVENT(netfs_write, + TP_PROTO(const struct netfs_io_request *wreq, + enum netfs_write_trace what), + + TP_ARGS(wreq, what), + + TP_STRUCT__entry( + __field(unsigned int, wreq ) + __field(unsigned int, cookie ) + __field(enum netfs_write_trace, what ) + __field(unsigned long long, start ) + __field(size_t, len ) + ), + + TP_fast_assign( + struct netfs_inode *__ctx = netfs_inode(wreq->inode); + struct fscache_cookie *__cookie = netfs_i_cookie(__ctx); + __entry->wreq = wreq->debug_id; + __entry->cookie = __cookie ? __cookie->debug_id : 0; + __entry->what = what; + __entry->start = wreq->start; + __entry->len = wreq->len; + ), + + TP_printk("R=%08x %s c=%08x by=%llx-%llx", + __entry->wreq, + __print_symbolic(__entry->what, netfs_write_traces), + __entry->cookie, + __entry->start, __entry->start + __entry->len - 1) + ); + #undef EM #undef E_ #endif /* _TRACE_NETFS_H */ diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index f8069ef2ee0f..110c1475c527 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -22,47 +22,37 @@ ** Event classes **/ -DECLARE_EVENT_CLASS(rpcrdma_completion_class, +DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class, TP_PROTO( - const struct ib_wc *wc, const struct rpc_rdma_cid *cid ), - TP_ARGS(wc, cid), + TP_ARGS(cid), TP_STRUCT__entry( __field(u32, cq_id) __field(int, completion_id) - __field(unsigned long, status) - __field(unsigned int, vendor_err) ), TP_fast_assign( __entry->cq_id = cid->ci_queue_id; __entry->completion_id = cid->ci_completion_id; - __entry->status = wc->status; - if (wc->status) - __entry->vendor_err = wc->vendor_err; - else - __entry->vendor_err = 0; ), - TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", - __entry->cq_id, __entry->completion_id, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err + TP_printk("cq.id=%d cid=%d", + __entry->cq_id, __entry->completion_id ) ); -#define DEFINE_COMPLETION_EVENT(name) \ - DEFINE_EVENT(rpcrdma_completion_class, name, \ +#define DEFINE_SIMPLE_CID_EVENT(name) \ + DEFINE_EVENT(rpcrdma_simple_cid_class, name, \ TP_PROTO( \ - const struct ib_wc *wc, \ const struct rpc_rdma_cid *cid \ ), \ - TP_ARGS(wc, cid)) + TP_ARGS(cid) \ + ) -DECLARE_EVENT_CLASS(rpcrdma_send_completion_class, +DECLARE_EVENT_CLASS(rpcrdma_completion_class, TP_PROTO( const struct ib_wc *wc, const struct rpc_rdma_cid *cid @@ -73,20 +63,29 @@ DECLARE_EVENT_CLASS(rpcrdma_send_completion_class, TP_STRUCT__entry( __field(u32, cq_id) __field(int, completion_id) + __field(unsigned long, status) + __field(unsigned int, vendor_err) ), TP_fast_assign( __entry->cq_id = cid->ci_queue_id; __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + if (wc->status) + __entry->vendor_err = wc->vendor_err; + else + __entry->vendor_err = 0; ), - TP_printk("cq.id=%u cid=%d", - __entry->cq_id, __entry->completion_id + TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err ) ); -#define DEFINE_SEND_COMPLETION_EVENT(name) \ - DEFINE_EVENT(rpcrdma_send_completion_class, name, \ +#define DEFINE_COMPLETION_EVENT(name) \ + DEFINE_EVENT(rpcrdma_completion_class, name, \ TP_PROTO( \ const struct ib_wc *wc, \ const struct rpc_rdma_cid *cid \ @@ -978,27 +977,7 @@ TRACE_EVENT(xprtrdma_post_send_err, ) ); -TRACE_EVENT(xprtrdma_post_recv, - TP_PROTO( - const struct rpcrdma_rep *rep - ), - - TP_ARGS(rep), - - TP_STRUCT__entry( - __field(u32, cq_id) - __field(int, completion_id) - ), - - TP_fast_assign( - __entry->cq_id = rep->rr_cid.ci_queue_id; - __entry->completion_id = rep->rr_cid.ci_completion_id; - ), - - TP_printk("cq.id=%d cid=%d", - __entry->cq_id, __entry->completion_id - ) -); +DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv); TRACE_EVENT(xprtrdma_post_recvs, TP_PROTO( @@ -1667,7 +1646,7 @@ TRACE_EVENT(svcrdma_encode_wseg, __entry->offset = offset; ), - TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", + TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", __entry->cq_id, __entry->completion_id, __entry->segno, __entry->length, (unsigned long long)__entry->offset, __entry->handle @@ -1703,7 +1682,7 @@ TRACE_EVENT(svcrdma_decode_rseg, __entry->offset = segment->rs_offset; ), - TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x", + TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x", __entry->cq_id, __entry->completion_id, __entry->segno, __entry->position, __entry->length, (unsigned long long)__entry->offset, __entry->handle @@ -1740,7 +1719,7 @@ TRACE_EVENT(svcrdma_decode_wseg, __entry->offset = segment->rs_offset; ), - TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", + TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", __entry->cq_id, __entry->completion_id, __entry->segno, __entry->length, (unsigned long long)__entry->offset, __entry->handle @@ -1783,29 +1762,29 @@ DEFINE_ERROR_EVENT(chunk); DECLARE_EVENT_CLASS(svcrdma_dma_map_class, TP_PROTO( - const struct svcxprt_rdma *rdma, + const struct rpc_rdma_cid *cid, u64 dma_addr, u32 length ), - TP_ARGS(rdma, dma_addr, length), + TP_ARGS(cid, dma_addr, length), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(u64, dma_addr) __field(u32, length) - __string(device, rdma->sc_cm_id->device->name) - __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; __entry->dma_addr = dma_addr; __entry->length = length; - __assign_str(device, rdma->sc_cm_id->device->name); - __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s device=%s dma_addr=%llu length=%u", - __get_str(addr), __get_str(device), + TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u", + __entry->cq_id, __entry->completion_id, __entry->dma_addr, __entry->length ) ); @@ -1813,11 +1792,12 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class, #define DEFINE_SVC_DMA_EVENT(name) \ DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \ TP_PROTO( \ - const struct svcxprt_rdma *rdma,\ + const struct rpc_rdma_cid *cid, \ u64 dma_addr, \ u32 length \ ), \ - TP_ARGS(rdma, dma_addr, length)) + TP_ARGS(cid, dma_addr, length) \ + ) DEFINE_SVC_DMA_EVENT(dma_map_page); DEFINE_SVC_DMA_EVENT(dma_map_err); @@ -1826,33 +1806,37 @@ DEFINE_SVC_DMA_EVENT(dma_unmap_page); TRACE_EVENT(svcrdma_dma_map_rw_err, TP_PROTO( const struct svcxprt_rdma *rdma, + u64 offset, + u32 handle, unsigned int nents, int status ), - TP_ARGS(rdma, nents, status), + TP_ARGS(rdma, offset, handle, nents, status), TP_STRUCT__entry( - __field(int, status) + __field(u32, cq_id) + __field(u32, handle) + __field(u64, offset) __field(unsigned int, nents) - __string(device, rdma->sc_cm_id->device->name) - __string(addr, rdma->sc_xprt.xpt_remotebuf) + __field(int, status) ), TP_fast_assign( - __entry->status = status; + __entry->cq_id = rdma->sc_sq_cq->res.id; + __entry->handle = handle; + __entry->offset = offset; __entry->nents = nents; - __assign_str(device, rdma->sc_cm_id->device->name); - __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + __entry->status = status; ), - TP_printk("addr=%s device=%s nents=%u status=%d", - __get_str(addr), __get_str(device), __entry->nents, - __entry->status + TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d", + __entry->cq_id, (unsigned long long)__entry->offset, + __entry->handle, __entry->nents, __entry->status ) ); -TRACE_EVENT(svcrdma_no_rwctx_err, +TRACE_EVENT(svcrdma_rwctx_empty, TP_PROTO( const struct svcxprt_rdma *rdma, unsigned int num_sges @@ -1861,79 +1845,75 @@ TRACE_EVENT(svcrdma_no_rwctx_err, TP_ARGS(rdma, num_sges), TP_STRUCT__entry( + __field(u32, cq_id) __field(unsigned int, num_sges) - __string(device, rdma->sc_cm_id->device->name) - __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( + __entry->cq_id = rdma->sc_sq_cq->res.id; __entry->num_sges = num_sges; - __assign_str(device, rdma->sc_cm_id->device->name); - __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s device=%s num_sges=%d", - __get_str(addr), __get_str(device), __entry->num_sges + TP_printk("cq.id=%u num_sges=%d", + __entry->cq_id, __entry->num_sges ) ); TRACE_EVENT(svcrdma_page_overrun_err, TP_PROTO( - const struct svcxprt_rdma *rdma, - const struct svc_rqst *rqst, + const struct rpc_rdma_cid *cid, unsigned int pageno ), - TP_ARGS(rdma, rqst, pageno), + TP_ARGS(cid, pageno), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, pageno) - __field(u32, xid) - __string(device, rdma->sc_cm_id->device->name) - __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; __entry->pageno = pageno; - __entry->xid = __be32_to_cpu(rqst->rq_xid); - __assign_str(device, rdma->sc_cm_id->device->name); - __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), - __get_str(device), __entry->xid, __entry->pageno + TP_printk("cq.id=%u cid=%d pageno=%u", + __entry->cq_id, __entry->completion_id, + __entry->pageno ) ); TRACE_EVENT(svcrdma_small_wrch_err, TP_PROTO( - const struct svcxprt_rdma *rdma, + const struct rpc_rdma_cid *cid, unsigned int remaining, unsigned int seg_no, unsigned int num_segs ), - TP_ARGS(rdma, remaining, seg_no, num_segs), + TP_ARGS(cid, remaining, seg_no, num_segs), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, remaining) __field(unsigned int, seg_no) __field(unsigned int, num_segs) - __string(device, rdma->sc_cm_id->device->name) - __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; __entry->remaining = remaining; __entry->seg_no = seg_no; __entry->num_segs = num_segs; - __assign_str(device, rdma->sc_cm_id->device->name); - __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", - __get_str(addr), __get_str(device), __entry->remaining, - __entry->seg_no, __entry->num_segs + TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u", + __entry->cq_id, __entry->completion_id, + __entry->remaining, __entry->seg_no, __entry->num_segs ) ); @@ -1959,7 +1939,7 @@ TRACE_EVENT(svcrdma_send_pullup, __entry->msglen = msglen; ), - TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)", + TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)", __entry->cq_id, __entry->completion_id, __entry->hdrlen, __entry->msglen, __entry->hdrlen + __entry->msglen) @@ -2014,37 +1994,17 @@ TRACE_EVENT(svcrdma_post_send, wr->ex.invalidate_rkey : 0; ), - TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", + TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x", __entry->cq_id, __entry->completion_id, __entry->num_sge, __entry->inv_rkey ) ); -DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send); +DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send); DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush); DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err); -TRACE_EVENT(svcrdma_post_recv, - TP_PROTO( - const struct svc_rdma_recv_ctxt *ctxt - ), - - TP_ARGS(ctxt), - - TP_STRUCT__entry( - __field(u32, cq_id) - __field(int, completion_id) - ), - - TP_fast_assign( - __entry->cq_id = ctxt->rc_cid.ci_queue_id; - __entry->completion_id = ctxt->rc_cid.ci_completion_id; - ), - - TP_printk("cq.id=%d cid=%d", - __entry->cq_id, __entry->completion_id - ) -); +DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv); DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv); DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush); @@ -2152,8 +2112,9 @@ TRACE_EVENT(svcrdma_wc_read, DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush); DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err); +DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished); -DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write); +DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write); DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush); DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err); @@ -2184,65 +2145,74 @@ TRACE_EVENT(svcrdma_qp_error, ) ); -DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, +DECLARE_EVENT_CLASS(svcrdma_sendqueue_class, TP_PROTO( - const struct svcxprt_rdma *rdma + const struct svcxprt_rdma *rdma, + const struct rpc_rdma_cid *cid ), - TP_ARGS(rdma), + TP_ARGS(rdma, cid), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(int, avail) __field(int, depth) - __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; __entry->avail = atomic_read(&rdma->sc_sq_avail); __entry->depth = rdma->sc_sq_depth; - __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s sc_sq_avail=%d/%d", - __get_str(addr), __entry->avail, __entry->depth + TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d", + __entry->cq_id, __entry->completion_id, + __entry->avail, __entry->depth ) ); #define DEFINE_SQ_EVENT(name) \ - DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\ - TP_PROTO( \ - const struct svcxprt_rdma *rdma \ - ), \ - TP_ARGS(rdma)) + DEFINE_EVENT(svcrdma_sendqueue_class, name, \ + TP_PROTO( \ + const struct svcxprt_rdma *rdma, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(rdma, cid) \ + ) -DEFINE_SQ_EVENT(full); -DEFINE_SQ_EVENT(retry); +DEFINE_SQ_EVENT(svcrdma_sq_full); +DEFINE_SQ_EVENT(svcrdma_sq_retry); TRACE_EVENT(svcrdma_sq_post_err, TP_PROTO( const struct svcxprt_rdma *rdma, + const struct rpc_rdma_cid *cid, int status ), - TP_ARGS(rdma, status), + TP_ARGS(rdma, cid, status), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(int, avail) __field(int, depth) __field(int, status) - __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; __entry->avail = atomic_read(&rdma->sc_sq_avail); __entry->depth = rdma->sc_sq_depth; __entry->status = status; - __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", - __get_str(addr), __entry->avail, __entry->depth, - __entry->status + TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d", + __entry->cq_id, __entry->completion_id, + __entry->avail, __entry->depth, __entry->status ) ); diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 4c53a5ef6257..87b8de9b6c1c 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -128,6 +128,7 @@ EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \ EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \ EM(rxrpc_skb_get_conn_work, "GET conn-work") \ + EM(rxrpc_skb_get_last_nack, "GET last-nack") \ EM(rxrpc_skb_get_local_work, "GET locl-work") \ EM(rxrpc_skb_get_reject_work, "GET rej-work ") \ EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \ @@ -141,6 +142,7 @@ EM(rxrpc_skb_put_error_report, "PUT error-rep") \ EM(rxrpc_skb_put_input, "PUT input ") \ EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \ + EM(rxrpc_skb_put_last_nack, "PUT last-nack") \ EM(rxrpc_skb_put_purge, "PUT purge ") \ EM(rxrpc_skb_put_rotate, "PUT rotate ") \ EM(rxrpc_skb_put_unknown, "PUT unknown ") \ @@ -178,7 +180,9 @@ #define rxrpc_peer_traces \ EM(rxrpc_peer_free, "FREE ") \ EM(rxrpc_peer_get_accept, "GET accept ") \ + EM(rxrpc_peer_get_application, "GET app ") \ EM(rxrpc_peer_get_bundle, "GET bundle ") \ + EM(rxrpc_peer_get_call, "GET call ") \ EM(rxrpc_peer_get_client_conn, "GET cln-conn") \ EM(rxrpc_peer_get_input, "GET input ") \ EM(rxrpc_peer_get_input_error, "GET inpt-err") \ @@ -187,6 +191,7 @@ EM(rxrpc_peer_get_service_conn, "GET srv-conn") \ EM(rxrpc_peer_new_client, "NEW client ") \ EM(rxrpc_peer_new_prealloc, "NEW prealloc") \ + EM(rxrpc_peer_put_application, "PUT app ") \ EM(rxrpc_peer_put_bundle, "PUT bundle ") \ EM(rxrpc_peer_put_call, "PUT call ") \ EM(rxrpc_peer_put_conn, "PUT conn ") \ @@ -328,7 +333,7 @@ E_(rxrpc_rtt_tx_ping, "PING") #define rxrpc_rtt_rx_traces \ - EM(rxrpc_rtt_rx_cancel, "CNCL") \ + EM(rxrpc_rtt_rx_other_ack, "OACK") \ EM(rxrpc_rtt_rx_obsolete, "OBSL") \ EM(rxrpc_rtt_rx_lost, "LOST") \ EM(rxrpc_rtt_rx_ping_response, "PONG") \ @@ -1549,7 +1554,7 @@ TRACE_EVENT(rxrpc_congest, memcpy(&__entry->sum, summary, sizeof(__entry->sum)); ), - TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s", + TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s", __entry->call, __entry->ack_serial, __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), @@ -1557,9 +1562,9 @@ TRACE_EVENT(rxrpc_congest, __print_symbolic(__entry->sum.mode, rxrpc_congest_modes), __entry->sum.cwnd, __entry->sum.ssthresh, - __entry->sum.nr_acks, __entry->sum.saw_nacks, + __entry->sum.nr_acks, __entry->sum.nr_retained_nacks, __entry->sum.nr_new_acks, - __entry->sum.nr_rot_new_acks, + __entry->sum.nr_new_nacks, __entry->top - __entry->hard_ack, __entry->sum.cumulative_acks, __entry->sum.dup_acks, diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index fbb99a61f714..dbb01b4b7451 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -493,33 +493,30 @@ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked, */ DECLARE_EVENT_CLASS(sched_stat_runtime, - TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), + TP_PROTO(struct task_struct *tsk, u64 runtime), - TP_ARGS(tsk, __perf_count(runtime), vruntime), + TP_ARGS(tsk, __perf_count(runtime)), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( u64, runtime ) - __field( u64, vruntime ) ), TP_fast_assign( memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; __entry->runtime = runtime; - __entry->vruntime = vruntime; ), - TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", + TP_printk("comm=%s pid=%d runtime=%Lu [ns]", __entry->comm, __entry->pid, - (unsigned long long)__entry->runtime, - (unsigned long long)__entry->vruntime) + (unsigned long long)__entry->runtime) ); DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, - TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), - TP_ARGS(tsk, runtime, vruntime)); + TP_PROTO(struct task_struct *tsk, u64 runtime), + TP_ARGS(tsk, runtime)); /* * Tracepoint for showing priority inheritance modifying a tasks @@ -664,6 +661,58 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa, TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) ); +#ifdef CONFIG_NUMA_BALANCING +#define NUMAB_SKIP_REASON \ + EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \ + EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \ + EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \ + EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \ + EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \ + EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \ + EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" ) + +/* Redefine for export. */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +NUMAB_SKIP_REASON + +/* Redefine for symbolic printing. */ +#undef EM +#undef EMe +#define EM(a, b) { a, b }, +#define EMe(a, b) { a, b } + +TRACE_EVENT(sched_skip_vma_numa, + + TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma, + enum numa_vmaskip_reason reason), + + TP_ARGS(mm, vma, reason), + + TP_STRUCT__entry( + __field(unsigned long, numa_scan_offset) + __field(unsigned long, vm_start) + __field(unsigned long, vm_end) + __field(enum numa_vmaskip_reason, reason) + ), + + TP_fast_assign( + __entry->numa_scan_offset = mm->numa_scan_offset; + __entry->vm_start = vma->vm_start; + __entry->vm_end = vma->vm_end; + __entry->reason = reason; + ), + + TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s", + __entry->numa_scan_offset, + __entry->vm_start, + __entry->vm_end, + __print_symbolic(__entry->reason, NUMAB_SKIP_REASON)) +); +#endif /* CONFIG_NUMA_BALANCING */ /* * Tracepoint for waking a polling cpu without an IPI. @@ -735,6 +784,11 @@ DECLARE_TRACE(sched_update_nr_running_tp, TP_PROTO(struct rq *rq, int change), TP_ARGS(rq, change)); +DECLARE_TRACE(sched_compute_energy_tp, + TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy, + unsigned long max_util, unsigned long busy_time), + TP_ARGS(p, dst_cpu, energy, max_util, busy_time)); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 6beb38c1dcb5..cdd3a45e6003 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1675,9 +1675,7 @@ DEFINE_SVCXDRBUF_EVENT(sendto); svc_rqst_flag(LOCAL) \ svc_rqst_flag(USEDEFERRAL) \ svc_rqst_flag(DROPME) \ - svc_rqst_flag(SPLICE_OK) \ svc_rqst_flag(VICTIM) \ - svc_rqst_flag(BUSY) \ svc_rqst_flag_end(DATA) #undef svc_rqst_flag diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index b4bc2828fa09..1ef58a04fc57 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -46,22 +46,21 @@ DEFINE_EVENT(timer_class, timer_init, /** * timer_start - called when the timer is started - * @timer: pointer to struct timer_list - * @expires: the timers expiry time - * @flags: the timers flags + * @timer: pointer to struct timer_list + * @bucket_expiry: the bucket expiry time */ TRACE_EVENT(timer_start, TP_PROTO(struct timer_list *timer, - unsigned long expires, - unsigned int flags), + unsigned long bucket_expiry), - TP_ARGS(timer, expires, flags), + TP_ARGS(timer, bucket_expiry), TP_STRUCT__entry( __field( void *, timer ) __field( void *, function ) __field( unsigned long, expires ) + __field( unsigned long, bucket_expiry ) __field( unsigned long, now ) __field( unsigned int, flags ) ), @@ -69,15 +68,16 @@ TRACE_EVENT(timer_start, TP_fast_assign( __entry->timer = timer; __entry->function = timer->function; - __entry->expires = expires; + __entry->expires = timer->expires; + __entry->bucket_expiry = bucket_expiry; __entry->now = jiffies; - __entry->flags = flags; + __entry->flags = timer->flags; ), - TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s", + TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] bucket_expiry=%lu cpu=%u idx=%u flags=%s", __entry->timer, __entry->function, __entry->expires, (long)__entry->expires - __entry->now, - __entry->flags & TIMER_CPUMASK, + __entry->bucket_expiry, __entry->flags & TIMER_CPUMASK, __entry->flags >> TIMER_ARRAYSHIFT, decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK)) ); @@ -142,6 +142,26 @@ DEFINE_EVENT(timer_class, timer_cancel, TP_ARGS(timer) ); +TRACE_EVENT(timer_base_idle, + + TP_PROTO(bool is_idle, unsigned int cpu), + + TP_ARGS(is_idle, cpu), + + TP_STRUCT__entry( + __field( bool, is_idle ) + __field( unsigned int, cpu ) + ), + + TP_fast_assign( + __entry->is_idle = is_idle; + __entry->cpu = cpu; + ), + + TP_printk("is_idle=%d cpu=%d", + __entry->is_idle, __entry->cpu) +); + #define decode_clockid(type) \ __print_symbolic(type, \ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \ diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index 992517ac3292..b930669bd1f0 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -267,15 +267,15 @@ DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume, TP_ARGS(dev_name, err, usecs, dev_state, link_state)); TRACE_EVENT(ufshcd_command, - TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, + TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t, unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len, u32 intr, u64 lba, u8 opcode, u8 group_id), - TP_ARGS(dev_name, str_t, tag, doorbell, hwq_id, transfer_len, - intr, lba, opcode, group_id), + TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba, + opcode, group_id), TP_STRUCT__entry( - __string(dev_name, dev_name) + __field(struct scsi_device *, sdev) __field(enum ufs_trace_str_t, str_t) __field(unsigned int, tag) __field(u32, doorbell) @@ -288,7 +288,7 @@ TRACE_EVENT(ufshcd_command, ), TP_fast_assign( - __assign_str(dev_name, dev_name); + __entry->sdev = sdev; __entry->str_t = str_t; __entry->tag = tag; __entry->doorbell = doorbell; @@ -302,8 +302,9 @@ TRACE_EVENT(ufshcd_command, TP_printk( "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d", - show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), - __entry->tag, __entry->doorbell, __entry->transfer_len, __entry->intr, + show_ufs_cmd_trace_str(__entry->str_t), + dev_name(&__entry->sdev->sdev_dev), __entry->tag, + __entry->doorbell, __entry->transfer_len, __entry->intr, __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode), (u32)__entry->group_id, __entry->hwq_id ) diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index d2123dd960d5..1a488c30afa5 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -285,10 +285,9 @@ TRACE_EVENT(mm_vmscan_lru_isolate, unsigned long nr_scanned, unsigned long nr_skipped, unsigned long nr_taken, - isolate_mode_t isolate_mode, int lru), - TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru), + TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, lru), TP_STRUCT__entry( __field(int, highest_zoneidx) @@ -297,7 +296,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __field(unsigned long, nr_scanned) __field(unsigned long, nr_skipped) __field(unsigned long, nr_taken) - __field(unsigned int, isolate_mode) __field(int, lru) ), @@ -308,7 +306,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __entry->nr_scanned = nr_scanned; __entry->nr_skipped = nr_skipped; __entry->nr_taken = nr_taken; - __entry->isolate_mode = (__force unsigned int)isolate_mode; __entry->lru = lru; ), @@ -316,8 +313,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate, * classzone is previous name of the highest_zoneidx. * Reason not to change it is the ABI requirement of the tracepoint. */ - TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s", - __entry->isolate_mode, + TP_printk("classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s", __entry->highest_zoneidx, __entry->order, __entry->nr_requested, diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h index d0b3f0ea9ba1..f1ebe36787c3 100644 --- a/include/trace/events/vsock_virtio_transport_common.h +++ b/include/trace/events/vsock_virtio_transport_common.h @@ -43,7 +43,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt, __u32 len, __u16 type, __u16 op, - __u32 flags + __u32 flags, + bool zcopy ), TP_ARGS( src_cid, src_port, @@ -51,7 +52,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt, len, type, op, - flags + flags, + zcopy ), TP_STRUCT__entry( __field(__u32, src_cid) @@ -62,6 +64,7 @@ TRACE_EVENT(virtio_transport_alloc_pkt, __field(__u16, type) __field(__u16, op) __field(__u32, flags) + __field(bool, zcopy) ), TP_fast_assign( __entry->src_cid = src_cid; @@ -72,14 +75,15 @@ TRACE_EVENT(virtio_transport_alloc_pkt, __entry->type = type; __entry->op = op; __entry->flags = flags; + __entry->zcopy = zcopy; ), - TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x", + TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s", __entry->src_cid, __entry->src_port, __entry->dst_cid, __entry->dst_port, __entry->len, show_type(__entry->type), show_op(__entry->op), - __entry->flags) + __entry->flags, __entry->zcopy ? "true" : "false") ); TRACE_EVENT(virtio_transport_recv_pkt, diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h index 44a3f565264d..0577f0cdd231 100644 --- a/include/trace/events/xen.h +++ b/include/trace/events/xen.h @@ -6,26 +6,26 @@ #define _TRACE_XEN_H #include <linux/tracepoint.h> -#include <asm/paravirt_types.h> +#include <asm/xen/hypervisor.h> #include <asm/xen/trace_types.h> struct multicall_entry; /* Multicalls */ DECLARE_EVENT_CLASS(xen_mc__batch, - TP_PROTO(enum paravirt_lazy_mode mode), + TP_PROTO(enum xen_lazy_mode mode), TP_ARGS(mode), TP_STRUCT__entry( - __field(enum paravirt_lazy_mode, mode) + __field(enum xen_lazy_mode, mode) ), TP_fast_assign(__entry->mode = mode), TP_printk("start batch LAZY_%s", - (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" : - (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE") + (__entry->mode == XEN_LAZY_MMU) ? "MMU" : + (__entry->mode == XEN_LAZY_CPU) ? "CPU" : "NONE") ); #define DEFINE_XEN_MC_BATCH(name) \ DEFINE_EVENT(xen_mc__batch, name, \ - TP_PROTO(enum paravirt_lazy_mode mode), \ + TP_PROTO(enum xen_lazy_mode mode), \ TP_ARGS(mode)) DEFINE_XEN_MC_BATCH(xen_mc_batch); diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 0f52d0ac47c5..b7bc545ec3b2 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -68,11 +68,6 @@ union __sifields { /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ struct { void __user *_addr; /* faulting insn/memory ref. */ -#ifdef __ia64__ - int _imm; /* immediate value for "break" */ - unsigned int _flags; /* see ia64 si_flags */ - unsigned long _isr; /* isr */ -#endif #define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \ sizeof(short) : __alignof__(void *)) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index abe087c53b4b..75f00965ab15 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -71,7 +71,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr) #define __NR_getcwd 17 __SYSCALL(__NR_getcwd, sys_getcwd) #define __NR_lookup_dcookie 18 -__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie) +__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall) #define __NR_eventfd2 19 __SYSCALL(__NR_eventfd2, sys_eventfd2) #define __NR_epoll_create1 20 @@ -816,15 +816,34 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease) __SYSCALL(__NR_futex_waitv, sys_futex_waitv) #define __NR_set_mempolicy_home_node 450 __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) - #define __NR_cachestat 451 __SYSCALL(__NR_cachestat, sys_cachestat) - #define __NR_fchmodat2 452 __SYSCALL(__NR_fchmodat2, sys_fchmodat2) +#define __NR_map_shadow_stack 453 +__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack) +#define __NR_futex_wake 454 +__SYSCALL(__NR_futex_wake, sys_futex_wake) +#define __NR_futex_wait 455 +__SYSCALL(__NR_futex_wait, sys_futex_wait) +#define __NR_futex_requeue 456 +__SYSCALL(__NR_futex_requeue, sys_futex_requeue) + +#define __NR_statmount 457 +__SYSCALL(__NR_statmount, sys_statmount) + +#define __NR_listmount 458 +__SYSCALL(__NR_listmount, sys_listmount) + +#define __NR_lsm_get_self_attr 459 +__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr) +#define __NR_lsm_set_self_attr 460 +__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr) +#define __NR_lsm_list_modules 461 +__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) #undef __NR_syscalls -#define __NR_syscalls 453 +#define __NR_syscalls 462 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index f477eda6a2b8..ad21c613fec8 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -150,7 +150,7 @@ extern "C" { */ #define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12) /* Flag that BO is shared coherently between multiple devices or CPU threads. - * May depend on GPU instructions to flush caches explicitly + * May depend on GPU instructions to flush caches to system scope explicitly. * * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and * may override the MTYPE selected in AMDGPU_VA_OP_MAP. @@ -163,6 +163,14 @@ extern "C" { * may override the MTYPE selected in AMDGPU_VA_OP_MAP. */ #define AMDGPU_GEM_CREATE_UNCACHED (1 << 14) +/* Flag that BO should be coherent across devices when using device-level + * atomics. May depend on GPU instructions to flush caches to device scope + * explicitly, promoting them to system scope automatically. + * + * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and + * may override the MTYPE selected in AMDGPU_VA_OP_MAP. + */ +#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15) struct drm_amdgpu_gem_create_in { /** the requested memory size */ @@ -241,9 +249,9 @@ union drm_amdgpu_bo_list { /* unknown cause */ #define AMDGPU_CTX_UNKNOWN_RESET 3 -/* indicate gpu reset occured after ctx created */ +/* indicate gpu reset occurred after ctx created */ #define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0) -/* indicate vram lost occured after ctx created */ +/* indicate vram lost occurred after ctx created */ #define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1) /* indicate some job from this context once cause gpu hang */ #define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2) @@ -586,7 +594,8 @@ struct drm_amdgpu_gem_va { */ #define AMDGPU_HW_IP_VCN_ENC 7 #define AMDGPU_HW_IP_VCN_JPEG 8 -#define AMDGPU_HW_IP_NUM 9 +#define AMDGPU_HW_IP_VPE 9 +#define AMDGPU_HW_IP_NUM 10 #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 @@ -797,6 +806,8 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow { #define AMDGPU_INFO_FW_MES 0x1a /* Subquery id: Query IMU firmware version */ #define AMDGPU_INFO_FW_IMU 0x1b + /* Subquery id: Query VPE firmware version */ + #define AMDGPU_INFO_FW_VPE 0x1c /* number of bytes moved for TTM migration */ #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f @@ -895,6 +906,8 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow { #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1 /* Query the max number of IBs per gang per submission */ #define AMDGPU_INFO_MAX_IBS 0x22 +/* query last page fault info */ +#define AMDGPU_INFO_GPUVM_FAULT 0x23 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff @@ -1220,6 +1233,20 @@ struct drm_amdgpu_info_video_caps { struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT]; }; +#define AMDGPU_VMHUB_TYPE_MASK 0xff +#define AMDGPU_VMHUB_TYPE_SHIFT 0 +#define AMDGPU_VMHUB_TYPE_GFX 0 +#define AMDGPU_VMHUB_TYPE_MM0 1 +#define AMDGPU_VMHUB_TYPE_MM1 2 +#define AMDGPU_VMHUB_IDX_MASK 0xff00 +#define AMDGPU_VMHUB_IDX_SHIFT 8 + +struct drm_amdgpu_info_gpuvm_fault { + __u64 addr; + __u32 status; + __u32 vmhub; +}; + /* * Supported GPU families */ @@ -1238,6 +1265,7 @@ struct drm_amdgpu_info_video_caps { #define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */ #define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */ #define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */ +#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */ #if defined(__cplusplus) } diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 794c1d857677..16122819edfe 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -713,7 +713,8 @@ struct drm_gem_open { /** * DRM_CAP_ASYNC_PAGE_FLIP * - * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC. + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy + * page-flips. */ #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 /** @@ -773,6 +774,13 @@ struct drm_gem_open { * :ref:`drm_sync_objects`. */ #define DRM_CAP_SYNCOBJ_TIMELINE 0x14 +/** + * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP + * + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic + * commits. + */ +#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15 /* DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { @@ -842,6 +850,31 @@ struct drm_get_cap { */ #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 +/** + * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT + * + * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and + * virtualbox) have additional restrictions for cursor planes (thus + * making cursor planes on those drivers not truly universal,) e.g. + * they need cursor planes to act like one would expect from a mouse + * cursor and have correctly set hotspot properties. + * If this client cap is not set the DRM core will hide cursor plane on + * those virtualized drivers because not setting it implies that the + * client is not capable of dealing with those extra restictions. + * Clients which do set cursor hotspot and treat the cursor plane + * like a mouse cursor should set this property. + * The client must enable &DRM_CLIENT_CAP_ATOMIC first. + * + * Setting this property on drivers which do not special case + * cursor planes (i.e. non-virtualized drivers) will return + * EOPNOTSUPP, which can be used by userspace to gauge + * requirements of the hardware/drivers they're running on. + * + * This capability is always supported for atomic-capable virtualized + * drivers starting from kernel version 6.6. + */ +#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6 + /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ struct drm_set_client_cap { __u64 capability; @@ -893,6 +926,7 @@ struct drm_syncobj_transfer { #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ +#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */ struct drm_syncobj_wait { __u64 handles; /* absolute timeout */ @@ -901,6 +935,14 @@ struct drm_syncobj_wait { __u32 flags; __u32 first_signaled; /* only valid when not waiting all */ __u32 pad; + /** + * @deadline_nsec - fence deadline hint + * + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is + * set. + */ + __u64 deadline_nsec; }; struct drm_syncobj_timeline_wait { @@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait { __u32 flags; __u32 first_signaled; /* only valid when not waiting all */ __u32 pad; + /** + * @deadline_nsec - fence deadline hint + * + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is + * set. + */ + __u64 deadline_nsec; }; /** @@ -1134,6 +1184,26 @@ extern "C" { #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) #define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) +/** + * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object. + * + * KMS dumb buffers provide a very primitive way to allocate a buffer object + * suitable for scanout and map it for software rendering. KMS dumb buffers are + * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb + * buffers are not suitable to be displayed on any other device than the KMS + * device where they were allocated from. Also see + * :ref:`kms_dumb_buffer_objects`. + * + * The IOCTL argument is a struct drm_mode_create_dumb. + * + * User-space is expected to create a KMS dumb buffer via this IOCTL, then add + * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via + * &DRM_IOCTL_MODE_MAP_DUMB. + * + * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported. + * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate + * driver preferences for dumb buffers. + */ #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) #define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) #define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) @@ -1198,6 +1268,26 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) +/** + * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer. + * + * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL + * argument is a framebuffer object ID. + * + * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable + * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept + * alive. When the plane no longer uses the framebuffer (because the + * framebuffer is replaced with another one, or the plane is disabled), the + * framebuffer is cleaned up. + * + * This is useful to implement flicker-free transitions between two processes. + * + * Depending on the threat model, user-space may want to ensure that the + * framebuffer doesn't expose any sensitive user information: closed + * framebuffers attached to a plane can be read back by the next DRM master. + */ +#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb) + /* * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f. diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 8db7fd3f743e..84d502e42961 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -54,7 +54,7 @@ extern "C" { * Format modifiers may change any property of the buffer, including the number * of planes and/or the required allocation size. Format modifiers are * vendor-namespaced, and as such the relationship between a fourcc code and a - * modifier is specific to the modifer being used. For example, some modifiers + * modifier is specific to the modifier being used. For example, some modifiers * may preserve meaning - such as number of planes - from the fourcc code, * whereas others may not. * @@ -79,7 +79,7 @@ extern "C" { * format. * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users * see modifiers as opaque tokens they can check for equality and intersect. - * These users musn't need to know to reason about the modifier value + * These users mustn't need to know to reason about the modifier value * (i.e. they are not expected to extract information out of the modifier). * * Vendors should document their modifier usage in as much detail as @@ -323,6 +323,8 @@ extern "C" { * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian */ #define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */ /* * 2 plane YCbCr MSB aligned @@ -538,7 +540,7 @@ extern "C" { * This is a tiled layout using 4Kb tiles in row-major layout. * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which * are arranged in four groups (two wide, two high) with column-major layout. - * Each group therefore consits out of four 256 byte units, which are also laid + * Each group therefore consists out of four 256 byte units, which are also laid * out as 2x2 column-major. * 256 byte units are made out of four 64 byte blocks of pixels, producing * either a square block or a 2:1 unit. @@ -1101,7 +1103,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) */ /* - * The top 4 bits (out of the 56 bits alloted for specifying vendor specific + * The top 4 bits (out of the 56 bits allotted for specifying vendor specific * modifiers) denote the category for modifiers. Currently we have three * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of * sixteen different categories. @@ -1417,7 +1419,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) * Amlogic FBC Memory Saving mode * * Indicates the storage is packed when pixel size is multiple of word - * boudaries, i.e. 8bit should be stored in this mode to save allocation + * boundaries, i.e. 8bit should be stored in this mode to save allocation * memory. * * This mode reduces body layout to 3072 bytes per 64x32 superblock with diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index ea1b639bcb28..7040e7ea80c7 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -36,10 +36,10 @@ extern "C" { /** * DOC: overview * - * DRM exposes many UAPI and structure definition to have a consistent - * and standardized interface with user. + * DRM exposes many UAPI and structure definitions to have a consistent + * and standardized interface with users. * Userspace can refer to these structure definitions and UAPI formats - * to communicate to driver + * to communicate to drivers. */ #define DRM_CONNECTOR_NAME_LEN 32 @@ -540,7 +540,7 @@ struct drm_mode_get_connector { /* the PROP_ATOMIC flag is used to hide properties from userspace that * is not aware of atomic properties. This is mostly to work around * older userspace (DDX drivers) that read/write each prop they find, - * witout being aware that this could be triggering a lengthy modeset. + * without being aware that this could be triggering a lengthy modeset. */ #define DRM_MODE_PROP_ATOMIC 0x80000000 @@ -664,7 +664,7 @@ struct drm_mode_fb_cmd { }; #define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ -#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */ +#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */ /** * struct drm_mode_fb_cmd2 - Frame-buffer metadata. @@ -846,6 +846,14 @@ struct drm_color_ctm { __u64 matrix[9]; }; +struct drm_color_ctm_3x4 { + /* + * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude + * (not two's complement!) format. + */ + __u64 matrix[12]; +}; + struct drm_color_lut { /* * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and @@ -881,8 +889,8 @@ struct hdr_metadata_infoframe { * These are coded as unsigned 16-bit values in units of * 0.00002, where 0x0000 represents zero and 0xC350 * represents 1.0000. - * @display_primaries.x: X cordinate of color primary. - * @display_primaries.y: Y cordinate of color primary. + * @display_primaries.x: X coordinate of color primary. + * @display_primaries.y: Y coordinate of color primary. */ struct { __u16 x, y; @@ -892,8 +900,8 @@ struct hdr_metadata_infoframe { * These are coded as unsigned 16-bit values in units of * 0.00002, where 0x0000 represents zero and 0xC350 * represents 1.0000. - * @white_point.x: X cordinate of whitepoint of color primary. - * @white_point.y: Y cordinate of whitepoint of color primary. + * @white_point.x: X coordinate of whitepoint of color primary. + * @white_point.y: Y coordinate of whitepoint of color primary. */ struct { __u16 x, y; @@ -957,6 +965,15 @@ struct hdr_output_metadata { * Request that the page-flip is performed as soon as possible, ie. with no * delay due to waiting for vblank. This may cause tearing to be visible on * the screen. + * + * When used with atomic uAPI, the driver will return an error if the hardware + * doesn't support performing an asynchronous page-flip for this update. + * User-space should handle this, e.g. by falling back to a regular page-flip. + * + * Note, some hardware might need to perform one last synchronous page-flip + * before being able to switch to asynchronous page-flips. As an exception, + * the driver will return success even though that first page-flip is not + * asynchronous. */ #define DRM_MODE_PAGE_FLIP_ASYNC 0x02 #define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4 @@ -1032,13 +1049,25 @@ struct drm_mode_crtc_page_flip_target { __u64 user_data; }; -/* create a dumb scanout buffer */ +/** + * struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout. + * @height: buffer height in pixels + * @width: buffer width in pixels + * @bpp: bits per pixel + * @flags: must be zero + * @handle: buffer object handle + * @pitch: number of bytes between two consecutive lines + * @size: size of the whole buffer in bytes + * + * User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds, + * the kernel fills @handle, @pitch and @size. + */ struct drm_mode_create_dumb { __u32 height; __u32 width; __u32 bpp; __u32 flags; - /* handle, pitch, size will be returned */ + __u32 handle; __u32 pitch; __u64 size; @@ -1311,6 +1340,16 @@ struct drm_mode_rect { __s32 y2; }; +/** + * struct drm_mode_closefb + * @fb_id: Framebuffer ID. + * @pad: Must be zero. + */ +struct drm_mode_closefb { + __u32 fb_id; + __u32 pad; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/habanalabs_accel.h b/include/uapi/drm/habanalabs_accel.h index e6436f3e8ea6..a512dc4cffd0 100644 --- a/include/uapi/drm/habanalabs_accel.h +++ b/include/uapi/drm/habanalabs_accel.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note * - * Copyright 2016-2022 HabanaLabs, Ltd. + * Copyright 2016-2023 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -8,8 +8,7 @@ #ifndef HABANALABS_H_ #define HABANALABS_H_ -#include <linux/types.h> -#include <linux/ioctl.h> +#include <drm/drm.h> /* * Defines that are asic-specific but constitutes as ABI between kernel driver @@ -607,9 +606,9 @@ enum gaudi2_engine_id { /* * ASIC specific PLL index * - * Used to retrieve in frequency info of different IPs via - * HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be - * used as an index in struct hl_pll_frequency_info + * Used to retrieve in frequency info of different IPs via HL_INFO_PLL_FREQUENCY under + * DRM_IOCTL_HL_INFO IOCTL. + * The enums need to be used as an index in struct hl_pll_frequency_info. */ enum hl_goya_pll_index { @@ -809,6 +808,7 @@ enum hl_server_type { * HL_INFO_FW_ERR_EVENT - Retrieve information on the reported FW error. * May return 0 even though no new data is available, in that case * timestamp will be 0. + * HL_INFO_USER_ENGINE_ERR_EVENT - Retrieve the last engine id that reported an error. */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -845,6 +845,8 @@ enum hl_server_type { #define HL_INFO_FW_GENERIC_REQ 35 #define HL_INFO_HW_ERR_EVENT 36 #define HL_INFO_FW_ERR_EVENT 37 +#define HL_INFO_USER_ENGINE_ERR_EVENT 38 +#define HL_INFO_DEV_SIGNED 40 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -884,11 +886,11 @@ enum hl_server_type { * @dram_enabled: Whether the DRAM is enabled. * @security_enabled: Whether security is enabled on device. * @mme_master_slave_mode: Indicate whether the MME is working in master/slave - * configuration. Relevant for Greco and later. + * configuration. Relevant for Gaudi2 and later. * @cpucp_version: The CPUCP f/w version. * @card_name: The card name as passed by the f/w. * @tpc_enabled_mask_ext: Bit-mask that represents which TPCs are enabled. - * Relevant for Greco and later. + * Relevant for Gaudi2 and later. * @dram_page_size: The DRAM physical page size. * @edma_enabled_mask: Bit-mask that represents which EDMAs are enabled. * Relevant for Gaudi2 and later. @@ -990,6 +992,7 @@ struct hl_info_reset_count { struct hl_info_time_sync { __u64 device_time; __u64 host_time; + __u64 tsc_time; }; /** @@ -1227,6 +1230,20 @@ struct hl_info_fw_err_event { }; /** + * struct hl_info_engine_err_event - engine error info + * @timestamp: time-stamp of error occurrence + * @engine_id: engine id who reported the error. + * @error_count: Amount of errors reported. + * @pad: size padding for u64 granularity. + */ +struct hl_info_engine_err_event { + __s64 timestamp; + __u16 engine_id; + __u16 error_count; + __u32 pad; +}; + +/** * struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information. * @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size * (e.g. 0x2100000 means that 1MB and 32MB pages are supported). @@ -1240,6 +1257,7 @@ struct hl_info_dev_memalloc_page_sizes { #define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */ #define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */ #define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */ +#define SEC_DEV_INFO_BUF_SZ 5120 /* * struct hl_info_sec_attest - attestation report of the boot @@ -1274,6 +1292,32 @@ struct hl_info_sec_attest { __u8 pad0[2]; }; +/* + * struct hl_info_signed - device information signed by a secured device. + * @nonce: number only used once. random number provided by host. this also passed to the quote + * command as a qualifying data. + * @pub_data_len: length of the public data (bytes) + * @certificate_len: length of the certificate (bytes) + * @info_sig_len: length of the attestation signature (bytes) + * @public_data: public key info signed info data (outPublic + name + qualifiedName) + * @certificate: certificate for the signing key + * @info_sig: signature of the info + nonce data. + * @dev_info_len: length of device info (bytes) + * @dev_info: device info as byte array. + */ +struct hl_info_signed { + __u32 nonce; + __u16 pub_data_len; + __u16 certificate_len; + __u8 info_sig_len; + __u8 public_data[SEC_PUB_DATA_BUF_SZ]; + __u8 certificate[SEC_CERTIFICATE_BUF_SZ]; + __u8 info_sig[SEC_SIGNATURE_BUF_SZ]; + __u16 dev_info_len; + __u8 dev_info[SEC_DEV_INFO_BUF_SZ]; + __u8 pad[2]; +}; + /** * struct hl_page_fault_info - page fault information. * @timestamp: timestamp of page fault. @@ -1409,7 +1453,7 @@ union hl_cb_args { * * HL_CS_CHUNK_FLAGS_USER_ALLOC_CB: * Indicates if the CB was allocated and mapped by userspace - * (relevant to greco and above). User allocated CB is a command buffer, + * (relevant to Gaudi2 and later). User allocated CB is a command buffer, * allocated by the user, via malloc (or similar). After allocating the * CB, the user invokes - “memory ioctl” to map the user memory into a * device virtual address. The user provides this address via the @@ -1434,7 +1478,7 @@ struct hl_cs_chunk { * a DRAM address of the internal CB. In Gaudi, this might also * represent a mapped host address of the CB. * - * Greco onwards: + * Gaudi2 onwards: * For H/W queue, this represents either a Handle of CB on the * Host, or an SRAM, a DRAM, or a mapped host address of the CB. * @@ -2147,6 +2191,13 @@ struct hl_debug_args { __u32 ctx_id; }; +#define HL_IOCTL_INFO 0x00 +#define HL_IOCTL_CB 0x01 +#define HL_IOCTL_CS 0x02 +#define HL_IOCTL_WAIT_CS 0x03 +#define HL_IOCTL_MEMORY 0x04 +#define HL_IOCTL_DEBUG 0x05 + /* * Various information operations such as: * - H/W IP information @@ -2161,8 +2212,7 @@ struct hl_debug_args { * definitions of structures in kernel and userspace, e.g. in case of old * userspace and new kernel driver */ -#define HL_IOCTL_INFO \ - _IOWR('H', 0x01, struct hl_info_args) +#define DRM_IOCTL_HL_INFO DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_INFO, struct hl_info_args) /* * Command Buffer @@ -2183,8 +2233,7 @@ struct hl_debug_args { * and won't be returned to user. * */ -#define HL_IOCTL_CB \ - _IOWR('H', 0x02, union hl_cb_args) +#define DRM_IOCTL_HL_CB DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CB, union hl_cb_args) /* * Command Submission @@ -2206,7 +2255,7 @@ struct hl_debug_args { * internal. The driver will get completion notifications from the device only * on JOBS which are enqueued in the external queues. * - * Greco onwards: + * Gaudi2 onwards: * There is a single type of queue for all types of engines, either DMA engines * for transfers from/to the host or inside the device, or compute engines. * The driver will get completion notifications from the device for all queues. @@ -2236,8 +2285,7 @@ struct hl_debug_args { * and only if CS N and CS N-1 are exactly the same (same CBs for the same * queues). */ -#define HL_IOCTL_CS \ - _IOWR('H', 0x03, union hl_cs_args) +#define DRM_IOCTL_HL_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CS, union hl_cs_args) /* * Wait for Command Submission @@ -2269,9 +2317,7 @@ struct hl_debug_args { * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the * device was reset (EIO) */ - -#define HL_IOCTL_WAIT_CS \ - _IOWR('H', 0x04, union hl_wait_cs_args) +#define DRM_IOCTL_HL_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_WAIT_CS, union hl_wait_cs_args) /* * Memory @@ -2288,8 +2334,7 @@ struct hl_debug_args { * There is an option for the user to specify the requested virtual address. * */ -#define HL_IOCTL_MEMORY \ - _IOWR('H', 0x05, union hl_mem_args) +#define DRM_IOCTL_HL_MEMORY DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_MEMORY, union hl_mem_args) /* * Debug @@ -2315,10 +2360,9 @@ struct hl_debug_args { * The driver can decide to "kick out" the user if he abuses this interface. * */ -#define HL_IOCTL_DEBUG \ - _IOWR('H', 0x06, struct hl_debug_args) +#define DRM_IOCTL_HL_DEBUG DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_DEBUG, struct hl_debug_args) -#define HL_COMMAND_START 0x01 -#define HL_COMMAND_END 0x07 +#define HL_COMMAND_START (DRM_COMMAND_BASE + HL_IOCTL_INFO) +#define HL_COMMAND_END (DRM_COMMAND_BASE + HL_IOCTL_DEBUG + 1) #endif /* HABANALABS_H_ */ diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 7000e5910a1d..fd4f9574d177 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -38,13 +38,13 @@ extern "C" { */ /** - * DOC: uevents generated by i915 on it's device node + * DOC: uevents generated by i915 on its device node * * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch - * event from the gpu l3 cache. Additional information supplied is ROW, + * event from the GPU L3 cache. Additional information supplied is ROW, * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep - * track of these events and if a specific cache-line seems to have a - * persistent error remap it with the l3 remapping tool supplied in + * track of these events, and if a specific cache-line seems to have a + * persistent error, remap it with the L3 remapping tool supplied in * intel-gpu-tools. The value supplied with the event is always 1. * * I915_ERROR_UEVENT - Generated upon error detection, currently only via @@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_EXEC_FENCE 44 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture - * user specified bufffers for post-mortem debugging of GPU hangs. See + * user-specified buffers for post-mortem debugging of GPU hangs. See * EXEC_OBJECT_CAPTURE. */ #define I915_PARAM_HAS_EXEC_CAPTURE 45 @@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy { * is accurate. * * The returned dword is split into two fields to indicate both - * the engine classess on which the object is being read, and the + * the engine classes on which the object is being read, and the * engine class on which it is currently being written (if any). * * The low word (bits 0:15) indicate if the object is being written @@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise { __u32 handle; /* Advice: either the buffer will be needed again in the near future, - * or wont be and could be discarded under memory pressure. + * or won't be and could be discarded under memory pressure. */ __u32 madv; @@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info { * // enough to hold our array of engines. The kernel will fill out the * // item.length for us, which is the number of bytes we need. * // - * // Alternatively a large buffer can be allocated straight away enabling + * // Alternatively a large buffer can be allocated straightaway enabling * // querying in one pass, in which case item.length should contain the * // length of the provided buffer. * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); @@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info { * // Now that we allocated the required number of bytes, we call the ioctl * // again, this time with the data_ptr pointing to our newly allocated * // blob, which the kernel can then populate with info on all engines. - * item.data_ptr = (uintptr_t)&info, + * item.data_ptr = (uintptr_t)&info; * * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); * if (err) ... @@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info { /** * struct drm_i915_engine_info * - * Describes one engine and it's capabilities as known to the driver. + * Describes one engine and its capabilities as known to the driver. */ struct drm_i915_engine_info { /** @engine: Engine class and instance. */ diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h index a58a14c9f222..19a13468eca5 100644 --- a/include/uapi/drm/ivpu_accel.h +++ b/include/uapi/drm/ivpu_accel.h @@ -53,7 +53,7 @@ extern "C" { #define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3 #define DRM_IVPU_PARAM_NUM_CONTEXTS 4 #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5 -#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 +#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 /* Deprecated */ #define DRM_IVPU_PARAM_CONTEXT_ID 7 #define DRM_IVPU_PARAM_FW_API_VERSION 8 #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9 @@ -64,13 +64,32 @@ extern "C" { #define DRM_IVPU_PLATFORM_TYPE_SILICON 0 +/* Deprecated, use DRM_IVPU_JOB_PRIORITY */ #define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3 -#define DRM_IVPU_CAP_METRIC_STREAMER 1 -#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2 +#define DRM_IVPU_JOB_PRIORITY_DEFAULT 0 +#define DRM_IVPU_JOB_PRIORITY_IDLE 1 +#define DRM_IVPU_JOB_PRIORITY_NORMAL 2 +#define DRM_IVPU_JOB_PRIORITY_FOCUS 3 +#define DRM_IVPU_JOB_PRIORITY_REALTIME 4 + +/** + * DRM_IVPU_CAP_METRIC_STREAMER + * + * Metric streamer support. Provides sampling of various hardware performance + * metrics like DMA bandwidth and cache miss/hits. Can be used for profiling. + */ +#define DRM_IVPU_CAP_METRIC_STREAMER 1 +/** + * DRM_IVPU_CAP_DMA_MEMORY_RANGE + * + * Driver has capability to allocate separate memory range + * accessible by hardware DMA. + */ +#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2 /** * struct drm_ivpu_param - Get/Set VPU parameters @@ -100,10 +119,6 @@ struct drm_ivpu_param { * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: * Lowest VPU virtual address available in the current context (read-only) * - * %DRM_IVPU_PARAM_CONTEXT_PRIORITY: - * Value of current context scheduling priority (read-write). - * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values. - * * %DRM_IVPU_PARAM_CONTEXT_ID: * Current context ID, always greater than 0 (read-only) * @@ -123,6 +138,8 @@ struct drm_ivpu_param { * %DRM_IVPU_PARAM_SKU: * VPU SKU ID (read-only) * + * %DRM_IVPU_PARAM_CAPABILITIES: + * Supported capabilities (read-only) */ __u32 param; @@ -182,7 +199,7 @@ struct drm_ivpu_bo_create { * * %DRM_IVPU_BO_UNCACHED: * - * Allocated BO will not be cached on host side nor snooped on the VPU side. + * Not supported. Use DRM_IVPU_BO_WC instead. * * %DRM_IVPU_BO_WC: * @@ -272,10 +289,23 @@ struct drm_ivpu_submit { * to be executed. The offset has to be 8-byte aligned. */ __u32 commands_offset; + + /** + * @priority: + * + * Priority to be set for related job command queue, can be one of the following: + * %DRM_IVPU_JOB_PRIORITY_DEFAULT + * %DRM_IVPU_JOB_PRIORITY_IDLE + * %DRM_IVPU_JOB_PRIORITY_NORMAL + * %DRM_IVPU_JOB_PRIORITY_FOCUS + * %DRM_IVPU_JOB_PRIORITY_REALTIME + */ + __u32 priority; }; /* drm_ivpu_bo_wait job status codes */ #define DRM_IVPU_JOB_STATUS_SUCCESS 0 +#define DRM_IVPU_JOB_STATUS_ABORTED 256 /** * struct drm_ivpu_bo_wait - Wait for BO to become inactive diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 6c34272a13fd..d8a6b3472760 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -86,6 +86,7 @@ struct drm_msm_timespec { #define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */ #define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */ #define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */ +#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */ /* For backwards compat. The original support for preemption was based on * a single ring per priority level so # of priority levels equals the # @@ -139,6 +140,8 @@ struct drm_msm_gem_new { #define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */ #define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */ #define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */ +#define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */ +#define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */ struct drm_msm_gem_info { __u32 handle; /* in */ diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index 8d7402c13e56..0bade1592f34 100644 --- a/include/uapi/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h @@ -44,6 +44,16 @@ extern "C" { #define NOUVEAU_GETPARAM_PTIMER_TIME 14 #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 #define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16 + +/* + * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam + * + * Query the maximum amount of IBs that can be pushed through a single + * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC + * ioctl(). + */ +#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17 + struct drm_nouveau_getparam { __u64 param; __u64 value; diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h new file mode 100644 index 000000000000..ccf6c2112468 --- /dev/null +++ b/include/uapi/drm/pvr_drm.h @@ -0,0 +1,1295 @@ +/* SPDX-License-Identifier: (GPL-2.0-only WITH Linux-syscall-note) OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_DRM_UAPI_H +#define PVR_DRM_UAPI_H + +#include "drm.h" + +#include <linux/const.h> +#include <linux/types.h> + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * DOC: PowerVR UAPI + * + * The PowerVR IOCTL argument structs have a few limitations in place, in + * addition to the standard kernel restrictions: + * + * - All members must be type-aligned. + * - The overall struct must be padded to 64-bit alignment. + * - Explicit padding is almost always required. This takes the form of + * ``_padding_[x]`` members of sufficient size to pad to the next power-of-two + * alignment, where [x] is the offset into the struct in hexadecimal. Arrays + * are never used for alignment. Padding fields must be zeroed; this is + * always checked. + * - Unions may only appear as the last member of a struct. + * - Individual union members may grow in the future. The space between the + * end of a union member and the end of its containing union is considered + * "implicit padding" and must be zeroed. This is always checked. + * + * In addition to the IOCTL argument structs, the PowerVR UAPI makes use of + * DEV_QUERY argument structs. These are used to fetch information about the + * device and runtime. These structs are subject to the same rules set out + * above. + */ + +/** + * struct drm_pvr_obj_array - Container used to pass arrays of objects + * + * It is not unusual to have to extend objects to pass new parameters, and the DRM + * ioctl infrastructure is supporting that by padding ioctl arguments with zeros + * when the data passed by userspace is smaller than the struct defined in the + * drm_ioctl_desc, thus keeping things backward compatible. This type is just + * applying the same concepts to indirect objects passed through arrays referenced + * from the main ioctl arguments structure: the stride basically defines the size + * of the object passed by userspace, which allows the kernel driver to pad with + * zeros when it's smaller than the size of the object it expects. + * + * Use ``DRM_PVR_OBJ_ARRAY()`` to fill object array fields, unless you + * have a very good reason not to. + */ +struct drm_pvr_obj_array { + /** @stride: Stride of object struct. Used for versioning. */ + __u32 stride; + + /** @count: Number of objects in the array. */ + __u32 count; + + /** @array: User pointer to an array of objects. */ + __u64 array; +}; + +/** + * DRM_PVR_OBJ_ARRAY() - Helper macro for filling &struct drm_pvr_obj_array. + * @cnt: Number of elements pointed to py @ptr. + * @ptr: Pointer to start of a C array. + * + * Return: Literal of type &struct drm_pvr_obj_array. + */ +#define DRM_PVR_OBJ_ARRAY(cnt, ptr) \ + { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) } + +/** + * DOC: PowerVR IOCTL interface + */ + +/** + * PVR_IOCTL() - Build a PowerVR IOCTL number + * @_ioctl: An incrementing id for this IOCTL. Added to %DRM_COMMAND_BASE. + * @_mode: Must be one of %DRM_IOR, %DRM_IOW or %DRM_IOWR. + * @_data: The type of the args struct passed by this IOCTL. + * + * The struct referred to by @_data must have a ``drm_pvr_ioctl_`` prefix and an + * ``_args suffix``. They are therefore omitted from @_data. + * + * This should only be used to build the constants described below; it should + * never be used to call an IOCTL directly. + * + * Return: An IOCTL number to be passed to ioctl() from userspace. + */ +#define PVR_IOCTL(_ioctl, _mode, _data) \ + _mode(DRM_COMMAND_BASE + (_ioctl), struct drm_pvr_ioctl_##_data##_args) + +#define DRM_IOCTL_PVR_DEV_QUERY PVR_IOCTL(0x00, DRM_IOWR, dev_query) +#define DRM_IOCTL_PVR_CREATE_BO PVR_IOCTL(0x01, DRM_IOWR, create_bo) +#define DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET PVR_IOCTL(0x02, DRM_IOWR, get_bo_mmap_offset) +#define DRM_IOCTL_PVR_CREATE_VM_CONTEXT PVR_IOCTL(0x03, DRM_IOWR, create_vm_context) +#define DRM_IOCTL_PVR_DESTROY_VM_CONTEXT PVR_IOCTL(0x04, DRM_IOW, destroy_vm_context) +#define DRM_IOCTL_PVR_VM_MAP PVR_IOCTL(0x05, DRM_IOW, vm_map) +#define DRM_IOCTL_PVR_VM_UNMAP PVR_IOCTL(0x06, DRM_IOW, vm_unmap) +#define DRM_IOCTL_PVR_CREATE_CONTEXT PVR_IOCTL(0x07, DRM_IOWR, create_context) +#define DRM_IOCTL_PVR_DESTROY_CONTEXT PVR_IOCTL(0x08, DRM_IOW, destroy_context) +#define DRM_IOCTL_PVR_CREATE_FREE_LIST PVR_IOCTL(0x09, DRM_IOWR, create_free_list) +#define DRM_IOCTL_PVR_DESTROY_FREE_LIST PVR_IOCTL(0x0a, DRM_IOW, destroy_free_list) +#define DRM_IOCTL_PVR_CREATE_HWRT_DATASET PVR_IOCTL(0x0b, DRM_IOWR, create_hwrt_dataset) +#define DRM_IOCTL_PVR_DESTROY_HWRT_DATASET PVR_IOCTL(0x0c, DRM_IOW, destroy_hwrt_dataset) +#define DRM_IOCTL_PVR_SUBMIT_JOBS PVR_IOCTL(0x0d, DRM_IOW, submit_jobs) + +/** + * DOC: PowerVR IOCTL DEV_QUERY interface + */ + +/** + * struct drm_pvr_dev_query_gpu_info - Container used to fetch information about + * the graphics processor. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_GPU_INFO_GET. + */ +struct drm_pvr_dev_query_gpu_info { + /** + * @gpu_id: GPU identifier. + * + * For all currently supported GPUs this is the BVNC encoded as a 64-bit + * value as follows: + * + * +--------+--------+--------+-------+ + * | 63..48 | 47..32 | 31..16 | 15..0 | + * +========+========+========+=======+ + * | B | V | N | C | + * +--------+--------+--------+-------+ + */ + __u64 gpu_id; + + /** + * @num_phantoms: Number of Phantoms present. + */ + __u32 num_phantoms; + + /** @_padding_c: Reserved. This field must be zeroed. */ + __u32 _padding_c; +}; + +/** + * struct drm_pvr_dev_query_runtime_info - Container used to fetch information + * about the graphics runtime. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET. + */ +struct drm_pvr_dev_query_runtime_info { + /** + * @free_list_min_pages: Minimum allowed free list size, + * in PM physical pages. + */ + __u64 free_list_min_pages; + + /** + * @free_list_max_pages: Maximum allowed free list size, + * in PM physical pages. + */ + __u64 free_list_max_pages; + + /** + * @common_store_alloc_region_size: Size of the Allocation + * Region within the Common Store used for coefficient and shared + * registers, in dwords. + */ + __u32 common_store_alloc_region_size; + + /** + * @common_store_partition_space_size: Size of the + * Partition Space within the Common Store for output buffers, in + * dwords. + */ + __u32 common_store_partition_space_size; + + /** + * @max_coeffs: Maximum coefficients, in dwords. + */ + __u32 max_coeffs; + + /** + * @cdm_max_local_mem_size_regs: Maximum amount of local + * memory available to a compute kernel, in dwords. + */ + __u32 cdm_max_local_mem_size_regs; +}; + +/** + * struct drm_pvr_dev_query_quirks - Container used to fetch information about + * hardware fixes for which the device may require support in the user mode + * driver. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_QUIRKS_GET. + */ +struct drm_pvr_dev_query_quirks { + /** + * @quirks: A userspace address for the hardware quirks __u32 array. + * + * The first @musthave_count items in the list are quirks that the + * client must support for this device. If userspace does not support + * all these quirks then functionality is not guaranteed and client + * initialisation must fail. + * The remaining quirks in the list affect userspace and the kernel or + * firmware. They are disabled by default and require userspace to + * opt-in. The opt-in mechanism depends on the quirk. + */ + __u64 quirks; + + /** @count: Length of @quirks (number of __u32). */ + __u16 count; + + /** + * @musthave_count: The number of entries in @quirks that are + * mandatory, starting at index 0. + */ + __u16 musthave_count; + + /** @_padding_c: Reserved. This field must be zeroed. */ + __u32 _padding_c; +}; + +/** + * struct drm_pvr_dev_query_enhancements - Container used to fetch information + * about optional enhancements supported by the device that require support in + * the user mode driver. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_ENHANCEMENTS_GET. + */ +struct drm_pvr_dev_query_enhancements { + /** + * @enhancements: A userspace address for the hardware enhancements + * __u32 array. + * + * These enhancements affect userspace and the kernel or firmware. They + * are disabled by default and require userspace to opt-in. The opt-in + * mechanism depends on the enhancement. + */ + __u64 enhancements; + + /** @count: Length of @enhancements (number of __u32). */ + __u16 count; + + /** @_padding_a: Reserved. This field must be zeroed. */ + __u16 _padding_a; + + /** @_padding_c: Reserved. This field must be zeroed. */ + __u32 _padding_c; +}; + +/** + * enum drm_pvr_heap_id - Array index for heap info data returned by + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + * + * For compatibility reasons all indices will be present in the returned array, + * however some heaps may not be present. These are indicated where + * &struct drm_pvr_heap.size is set to zero. + */ +enum drm_pvr_heap_id { + /** @DRM_PVR_HEAP_GENERAL: General purpose heap. */ + DRM_PVR_HEAP_GENERAL = 0, + /** @DRM_PVR_HEAP_PDS_CODE_DATA: PDS code and data heap. */ + DRM_PVR_HEAP_PDS_CODE_DATA, + /** @DRM_PVR_HEAP_USC_CODE: USC code heap. */ + DRM_PVR_HEAP_USC_CODE, + /** @DRM_PVR_HEAP_RGNHDR: Region header heap. Only used if GPU has BRN63142. */ + DRM_PVR_HEAP_RGNHDR, + /** @DRM_PVR_HEAP_VIS_TEST: Visibility test heap. */ + DRM_PVR_HEAP_VIS_TEST, + /** @DRM_PVR_HEAP_TRANSFER_FRAG: Transfer fragment heap. */ + DRM_PVR_HEAP_TRANSFER_FRAG, + + /** + * @DRM_PVR_HEAP_COUNT: The number of heaps returned by + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + * + * More heaps may be added, so this also serves as the copy limit when + * sent by the caller. + */ + DRM_PVR_HEAP_COUNT + /* Please only add additional heaps above DRM_PVR_HEAP_COUNT! */ +}; + +/** + * struct drm_pvr_heap - Container holding information about a single heap. + * + * This will always be fetched as an array. + */ +struct drm_pvr_heap { + /** @base: Base address of heap. */ + __u64 base; + + /** @size: Size of heap, in bytes. Will be 0 if the heap is not present. */ + __u64 size; + + /** @flags: Flags for this heap. Currently always 0. */ + __u32 flags; + + /** @page_size_log2: Log2 of page size. */ + __u32 page_size_log2; +}; + +/** + * struct drm_pvr_dev_query_heap_info - Container used to fetch information + * about heaps supported by the device driver. + * + * Please note all driver-supported heaps will be returned up to &heaps.count. + * Some heaps will not be present in all devices, which will be indicated by + * &struct drm_pvr_heap.size being set to zero. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + */ +struct drm_pvr_dev_query_heap_info { + /** + * @heaps: Array of &struct drm_pvr_heap. If pointer is NULL, the count + * and stride will be updated with those known to the driver version, to + * facilitate allocation by the caller. + */ + struct drm_pvr_obj_array heaps; +}; + +/** + * enum drm_pvr_static_data_area_usage - Array index for static data area info + * returned by %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET. + * + * For compatibility reasons all indices will be present in the returned array, + * however some areas may not be present. These are indicated where + * &struct drm_pvr_static_data_area.size is set to zero. + */ +enum drm_pvr_static_data_area_usage { + /** + * @DRM_PVR_STATIC_DATA_AREA_EOT: End of Tile PDS program code segment. + * + * The End of Tile PDS task runs at completion of a tile during a fragment job, and is + * responsible for emitting the tile to the Pixel Back End. + */ + DRM_PVR_STATIC_DATA_AREA_EOT = 0, + + /** + * @DRM_PVR_STATIC_DATA_AREA_FENCE: MCU fence area, used during cache flush and + * invalidation. + * + * This must point to valid physical memory but the contents otherwise are not used. + */ + DRM_PVR_STATIC_DATA_AREA_FENCE, + + /** + * @DRM_PVR_STATIC_DATA_AREA_VDM_SYNC: VDM sync program. + * + * The VDM sync program is used to synchronise multiple areas of the GPU hardware. + */ + DRM_PVR_STATIC_DATA_AREA_VDM_SYNC, + + /** + * @DRM_PVR_STATIC_DATA_AREA_YUV_CSC: YUV coefficients. + * + * Area contains up to 16 slots with stride of 64 bytes. Each is a 3x4 matrix of u16 fixed + * point numbers, with 1 sign bit, 2 integer bits and 13 fractional bits. + * + * The slots are : + * 0 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR + * 1 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (full range) + * 2 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (conformant range) + * 3 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (full range) + * 4 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range) + * 5 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (full range) + * 6 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range) + * 7 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (full range) + * 8 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range) + * 9 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range, 10 bit) + * 10 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range, 10 bit) + * 11 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range, 10 bit) + * 14 = Identity (biased) + * 15 = Identity + */ + DRM_PVR_STATIC_DATA_AREA_YUV_CSC, +}; + +/** + * struct drm_pvr_static_data_area - Container holding information about a + * single static data area. + * + * This will always be fetched as an array. + */ +struct drm_pvr_static_data_area { + /** + * @area_usage: Usage of static data area. + * See &enum drm_pvr_static_data_area_usage. + */ + __u16 area_usage; + + /** + * @location_heap_id: Array index of heap where this of static data + * area is located. This array is fetched using + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + */ + __u16 location_heap_id; + + /** @size: Size of static data area. Not present if set to zero. */ + __u32 size; + + /** @offset: Offset of static data area from start of heap. */ + __u64 offset; +}; + +/** + * struct drm_pvr_dev_query_static_data_areas - Container used to fetch + * information about the static data areas in heaps supported by the device + * driver. + * + * Please note all driver-supported static data areas will be returned up to + * &static_data_areas.count. Some will not be present for all devices which, + * will be indicated by &struct drm_pvr_static_data_area.size being set to zero. + * + * Further, some heaps will not be present either. See &struct + * drm_pvr_dev_query_heap_info. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET. + */ +struct drm_pvr_dev_query_static_data_areas { + /** + * @static_data_areas: Array of &struct drm_pvr_static_data_area. If + * pointer is NULL, the count and stride will be updated with those + * known to the driver version, to facilitate allocation by the caller. + */ + struct drm_pvr_obj_array static_data_areas; +}; + +/** + * enum drm_pvr_dev_query - For use with &drm_pvr_ioctl_dev_query_args.type to + * indicate the type of the receiving container. + * + * Append only. Do not reorder. + */ +enum drm_pvr_dev_query { + /** + * @DRM_PVR_DEV_QUERY_GPU_INFO_GET: The dev query args contain a pointer + * to &struct drm_pvr_dev_query_gpu_info. + */ + DRM_PVR_DEV_QUERY_GPU_INFO_GET = 0, + + /** + * @DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET: The dev query args contain a + * pointer to &struct drm_pvr_dev_query_runtime_info. + */ + DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET, + + /** + * @DRM_PVR_DEV_QUERY_QUIRKS_GET: The dev query args contain a pointer + * to &struct drm_pvr_dev_query_quirks. + */ + DRM_PVR_DEV_QUERY_QUIRKS_GET, + + /** + * @DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET: The dev query args contain a + * pointer to &struct drm_pvr_dev_query_enhancements. + */ + DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET, + + /** + * @DRM_PVR_DEV_QUERY_HEAP_INFO_GET: The dev query args contain a + * pointer to &struct drm_pvr_dev_query_heap_info. + */ + DRM_PVR_DEV_QUERY_HEAP_INFO_GET, + + /** + * @DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET: The dev query args contain + * a pointer to &struct drm_pvr_dev_query_static_data_areas. + */ + DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET, +}; + +/** + * struct drm_pvr_ioctl_dev_query_args - Arguments for %DRM_IOCTL_PVR_DEV_QUERY. + */ +struct drm_pvr_ioctl_dev_query_args { + /** + * @type: Type of query and output struct. See &enum drm_pvr_dev_query. + */ + __u32 type; + + /** + * @size: Size of the receiving struct, see @type. + * + * After a successful call this will be updated to the written byte + * length. + * Can also be used to get the minimum byte length (see @pointer). + * This allows additional fields to be appended to the structs in + * future. + */ + __u32 size; + + /** + * @pointer: Pointer to struct @type. + * + * Must be large enough to contain @size bytes. + * If pointer is NULL, the expected size will be returned in the @size + * field, but no other data will be written. + */ + __u64 pointer; +}; + +/** + * DOC: PowerVR IOCTL CREATE_BO interface + */ + +/** + * DOC: Flags for CREATE_BO + * + * We use "device" to refer to the GPU here because of the ambiguity between CPU and GPU in some + * fonts. + * + * Device mapping options + * :DRM_PVR_BO_BYPASS_DEVICE_CACHE: Specify that device accesses to this memory will bypass the + * cache. This is used for buffers that will either be regularly updated by the CPU (eg free + * lists) or will be accessed only once and therefore isn't worth caching (eg partial render + * buffers). + * By default, the device flushes its memory caches after every job, so this is not normally + * required for coherency. + * :DRM_PVR_BO_PM_FW_PROTECT: Specify that only the Parameter Manager (PM) and/or firmware + * processor should be allowed to access this memory when mapped to the device. It is not + * valid to specify this flag with DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS. + * + * CPU mapping options + * :DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS: Allow userspace to map and access the contents of this + * memory. It is not valid to specify this flag with DRM_PVR_BO_PM_FW_PROTECT. + */ +#define DRM_PVR_BO_BYPASS_DEVICE_CACHE _BITULL(0) +#define DRM_PVR_BO_PM_FW_PROTECT _BITULL(1) +#define DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS _BITULL(2) +/* Bits 3..63 are reserved. */ + +#define DRM_PVR_BO_FLAGS_MASK (DRM_PVR_BO_BYPASS_DEVICE_CACHE | DRM_PVR_BO_PM_FW_PROTECT | \ + DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS) + +/** + * struct drm_pvr_ioctl_create_bo_args - Arguments for %DRM_IOCTL_PVR_CREATE_BO + */ +struct drm_pvr_ioctl_create_bo_args { + /** + * @size: [IN] Size of buffer object to create. This must be page size + * aligned. + */ + __u64 size; + + /** + * @handle: [OUT] GEM handle of the new buffer object for use in + * userspace. + */ + __u32 handle; + + /** @_padding_c: Reserved. This field must be zeroed. */ + __u32 _padding_c; + + /** + * @flags: [IN] Options which will affect the behaviour of this + * creation operation and future mapping operations on the created + * object. This field must be a valid combination of ``DRM_PVR_BO_*`` + * values, with all bits marked as reserved set to zero. + */ + __u64 flags; +}; + +/** + * DOC: PowerVR IOCTL GET_BO_MMAP_OFFSET interface + */ + +/** + * struct drm_pvr_ioctl_get_bo_mmap_offset_args - Arguments for + * %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET + * + * Like other DRM drivers, the "mmap" IOCTL doesn't actually map any memory. + * Instead, it allocates a fake offset which refers to the specified buffer + * object. This offset can be used with a real mmap call on the DRM device + * itself. + */ +struct drm_pvr_ioctl_get_bo_mmap_offset_args { + /** @handle: [IN] GEM handle of the buffer object to be mapped. */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; + + /** @offset: [OUT] Fake offset to use in the real mmap call. */ + __u64 offset; +}; + +/** + * DOC: PowerVR IOCTL CREATE_VM_CONTEXT and DESTROY_VM_CONTEXT interfaces + */ + +/** + * struct drm_pvr_ioctl_create_vm_context_args - Arguments for + * %DRM_IOCTL_PVR_CREATE_VM_CONTEXT + */ +struct drm_pvr_ioctl_create_vm_context_args { + /** @handle: [OUT] Handle for new VM context. */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * struct drm_pvr_ioctl_destroy_vm_context_args - Arguments for + * %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT + */ +struct drm_pvr_ioctl_destroy_vm_context_args { + /** + * @handle: [IN] Handle for VM context to be destroyed. + */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * DOC: PowerVR IOCTL VM_MAP and VM_UNMAP interfaces + * + * The VM UAPI allows userspace to create buffer object mappings in GPU virtual address space. + * + * The client is responsible for managing GPU address space. It should allocate mappings within + * the heaps returned by %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + * + * %DRM_IOCTL_PVR_VM_MAP creates a new mapping. The client provides the target virtual address for + * the mapping. Size and offset within the mapped buffer object can be specified, so the client can + * partially map a buffer. + * + * %DRM_IOCTL_PVR_VM_UNMAP removes a mapping. The entire mapping will be removed from GPU address + * space only if the size of the mapping matches that known to the driver. + */ + +/** + * struct drm_pvr_ioctl_vm_map_args - Arguments for %DRM_IOCTL_PVR_VM_MAP. + */ +struct drm_pvr_ioctl_vm_map_args { + /** + * @vm_context_handle: [IN] Handle for VM context for this mapping to + * exist in. + */ + __u32 vm_context_handle; + + /** @flags: [IN] Flags which affect this mapping. Currently always 0. */ + __u32 flags; + + /** + * @device_addr: [IN] Requested device-virtual address for the mapping. + * This must be non-zero and aligned to the device page size for the + * heap containing the requested address. It is an error to specify an + * address which is not contained within one of the heaps returned by + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + */ + __u64 device_addr; + + /** + * @handle: [IN] Handle of the target buffer object. This must be a + * valid handle returned by %DRM_IOCTL_PVR_CREATE_BO. + */ + __u32 handle; + + /** @_padding_14: Reserved. This field must be zeroed. */ + __u32 _padding_14; + + /** + * @offset: [IN] Offset into the target bo from which to begin the + * mapping. + */ + __u64 offset; + + /** + * @size: [IN] Size of the requested mapping. Must be aligned to + * the device page size for the heap containing the requested address, + * as well as the host page size. When added to @device_addr, the + * result must not overflow the heap which contains @device_addr (i.e. + * the range specified by @device_addr and @size must be completely + * contained within a single heap specified by + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET). + */ + __u64 size; +}; + +/** + * struct drm_pvr_ioctl_vm_unmap_args - Arguments for %DRM_IOCTL_PVR_VM_UNMAP. + */ +struct drm_pvr_ioctl_vm_unmap_args { + /** + * @vm_context_handle: [IN] Handle for VM context that this mapping + * exists in. + */ + __u32 vm_context_handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; + + /** + * @device_addr: [IN] Device-virtual address at the start of the target + * mapping. This must be non-zero. + */ + __u64 device_addr; + + /** + * @size: Size in bytes of the target mapping. This must be non-zero. + */ + __u64 size; +}; + +/** + * DOC: PowerVR IOCTL CREATE_CONTEXT and DESTROY_CONTEXT interfaces + */ + +/** + * enum drm_pvr_ctx_priority - Arguments for + * &drm_pvr_ioctl_create_context_args.priority + */ +enum drm_pvr_ctx_priority { + /** @DRM_PVR_CTX_PRIORITY_LOW: Priority below normal. */ + DRM_PVR_CTX_PRIORITY_LOW = -512, + + /** @DRM_PVR_CTX_PRIORITY_NORMAL: Normal priority. */ + DRM_PVR_CTX_PRIORITY_NORMAL = 0, + + /** + * @DRM_PVR_CTX_PRIORITY_HIGH: Priority above normal. + * Note this requires ``CAP_SYS_NICE`` or ``DRM_MASTER``. + */ + DRM_PVR_CTX_PRIORITY_HIGH = 512, +}; + +/** + * enum drm_pvr_ctx_type - Arguments for + * &struct drm_pvr_ioctl_create_context_args.type + */ +enum drm_pvr_ctx_type { + /** + * @DRM_PVR_CTX_TYPE_RENDER: Render context. + */ + DRM_PVR_CTX_TYPE_RENDER = 0, + + /** + * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context. + */ + DRM_PVR_CTX_TYPE_COMPUTE, + + /** + * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data + * master. + */ + DRM_PVR_CTX_TYPE_TRANSFER_FRAG, +}; + +/** + * struct drm_pvr_ioctl_create_context_args - Arguments for + * %DRM_IOCTL_PVR_CREATE_CONTEXT + */ +struct drm_pvr_ioctl_create_context_args { + /** + * @type: [IN] Type of context to create. + * + * This must be one of the values defined by &enum drm_pvr_ctx_type. + */ + __u32 type; + + /** @flags: [IN] Flags for context. */ + __u32 flags; + + /** + * @priority: [IN] Priority of new context. + * + * This must be one of the values defined by &enum drm_pvr_ctx_priority. + */ + __s32 priority; + + /** @handle: [OUT] Handle for new context. */ + __u32 handle; + + /** + * @static_context_state: [IN] Pointer to static context state stream. + */ + __u64 static_context_state; + + /** + * @static_context_state_len: [IN] Length of static context state, in bytes. + */ + __u32 static_context_state_len; + + /** + * @vm_context_handle: [IN] Handle for VM context that this context is + * associated with. + */ + __u32 vm_context_handle; + + /** + * @callstack_addr: [IN] Address for initial call stack pointer. Only valid + * if @type is %DRM_PVR_CTX_TYPE_RENDER, otherwise must be 0. + */ + __u64 callstack_addr; +}; + +/** + * struct drm_pvr_ioctl_destroy_context_args - Arguments for + * %DRM_IOCTL_PVR_DESTROY_CONTEXT + */ +struct drm_pvr_ioctl_destroy_context_args { + /** + * @handle: [IN] Handle for context to be destroyed. + */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * DOC: PowerVR IOCTL CREATE_FREE_LIST and DESTROY_FREE_LIST interfaces + */ + +/** + * struct drm_pvr_ioctl_create_free_list_args - Arguments for + * %DRM_IOCTL_PVR_CREATE_FREE_LIST + * + * Free list arguments have the following constraints : + * + * - @max_num_pages must be greater than zero. + * - @grow_threshold must be between 0 and 100. + * - @grow_num_pages must be less than or equal to &max_num_pages. + * - @initial_num_pages, @max_num_pages and @grow_num_pages must be multiples + * of 4. + * - When &grow_num_pages is 0, @initial_num_pages must be equal to + * @max_num_pages. + * - When &grow_num_pages is non-zero, @initial_num_pages must be less than + * @max_num_pages. + */ +struct drm_pvr_ioctl_create_free_list_args { + /** + * @free_list_gpu_addr: [IN] Address of GPU mapping of buffer object + * containing memory to be used by free list. + * + * The mapped region of the buffer object must be at least + * @max_num_pages * ``sizeof(__u32)``. + * + * The buffer object must have been created with + * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT set and + * %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS not set. + */ + __u64 free_list_gpu_addr; + + /** @initial_num_pages: [IN] Pages initially allocated to free list. */ + __u32 initial_num_pages; + + /** @max_num_pages: [IN] Maximum number of pages in free list. */ + __u32 max_num_pages; + + /** @grow_num_pages: [IN] Pages to grow free list by per request. */ + __u32 grow_num_pages; + + /** + * @grow_threshold: [IN] Percentage of FL memory used that should + * trigger a new grow request. + */ + __u32 grow_threshold; + + /** + * @vm_context_handle: [IN] Handle for VM context that the free list buffer + * object is mapped in. + */ + __u32 vm_context_handle; + + /** + * @handle: [OUT] Handle for created free list. + */ + __u32 handle; +}; + +/** + * struct drm_pvr_ioctl_destroy_free_list_args - Arguments for + * %DRM_IOCTL_PVR_DESTROY_FREE_LIST + */ +struct drm_pvr_ioctl_destroy_free_list_args { + /** + * @handle: [IN] Handle for free list to be destroyed. + */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * DOC: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces + */ + +/** + * struct drm_pvr_create_hwrt_geom_data_args - Geometry data arguments used for + * &struct drm_pvr_ioctl_create_hwrt_dataset_args.geom_data_args. + */ +struct drm_pvr_create_hwrt_geom_data_args { + /** @tpc_dev_addr: [IN] Tail pointer cache GPU virtual address. */ + __u64 tpc_dev_addr; + + /** @tpc_size: [IN] Size of TPC, in bytes. */ + __u32 tpc_size; + + /** @tpc_stride: [IN] Stride between layers in TPC, in pages */ + __u32 tpc_stride; + + /** @vheap_table_dev_addr: [IN] VHEAP table GPU virtual address. */ + __u64 vheap_table_dev_addr; + + /** @rtc_dev_addr: [IN] Render Target Cache virtual address. */ + __u64 rtc_dev_addr; +}; + +/** + * struct drm_pvr_create_hwrt_rt_data_args - Render target arguments used for + * &struct drm_pvr_ioctl_create_hwrt_dataset_args.rt_data_args. + */ +struct drm_pvr_create_hwrt_rt_data_args { + /** @pm_mlist_dev_addr: [IN] PM MLIST GPU virtual address. */ + __u64 pm_mlist_dev_addr; + + /** @macrotile_array_dev_addr: [IN] Macrotile array GPU virtual address. */ + __u64 macrotile_array_dev_addr; + + /** @region_header_dev_addr: [IN] Region header array GPU virtual address. */ + __u64 region_header_dev_addr; +}; + +#define PVR_DRM_HWRT_FREE_LIST_LOCAL 0 +#define PVR_DRM_HWRT_FREE_LIST_GLOBAL 1U + +/** + * struct drm_pvr_ioctl_create_hwrt_dataset_args - Arguments for + * %DRM_IOCTL_PVR_CREATE_HWRT_DATASET + */ +struct drm_pvr_ioctl_create_hwrt_dataset_args { + /** @geom_data_args: [IN] Geometry data arguments. */ + struct drm_pvr_create_hwrt_geom_data_args geom_data_args; + + /** + * @rt_data_args: [IN] Array of render target arguments. + * + * Each entry in this array represents a render target in a double buffered + * setup. + */ + struct drm_pvr_create_hwrt_rt_data_args rt_data_args[2]; + + /** + * @free_list_handles: [IN] Array of free list handles. + * + * free_list_handles[PVR_DRM_HWRT_FREE_LIST_LOCAL] must have initial + * size of at least that reported by + * &drm_pvr_dev_query_runtime_info.free_list_min_pages. + */ + __u32 free_list_handles[2]; + + /** @width: [IN] Width in pixels. */ + __u32 width; + + /** @height: [IN] Height in pixels. */ + __u32 height; + + /** @samples: [IN] Number of samples. */ + __u32 samples; + + /** @layers: [IN] Number of layers. */ + __u32 layers; + + /** @isp_merge_lower_x: [IN] Lower X coefficient for triangle merging. */ + __u32 isp_merge_lower_x; + + /** @isp_merge_lower_y: [IN] Lower Y coefficient for triangle merging. */ + __u32 isp_merge_lower_y; + + /** @isp_merge_scale_x: [IN] Scale X coefficient for triangle merging. */ + __u32 isp_merge_scale_x; + + /** @isp_merge_scale_y: [IN] Scale Y coefficient for triangle merging. */ + __u32 isp_merge_scale_y; + + /** @isp_merge_upper_x: [IN] Upper X coefficient for triangle merging. */ + __u32 isp_merge_upper_x; + + /** @isp_merge_upper_y: [IN] Upper Y coefficient for triangle merging. */ + __u32 isp_merge_upper_y; + + /** + * @region_header_size: [IN] Size of region header array. This common field is used by + * both render targets in this data set. + * + * The units for this field differ depending on what version of the simple internal + * parameter format the device uses. If format 2 is in use then this is interpreted as the + * number of region headers. For other formats it is interpreted as the size in dwords. + */ + __u32 region_header_size; + + /** + * @handle: [OUT] Handle for created HWRT dataset. + */ + __u32 handle; +}; + +/** + * struct drm_pvr_ioctl_destroy_hwrt_dataset_args - Arguments for + * %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET + */ +struct drm_pvr_ioctl_destroy_hwrt_dataset_args { + /** + * @handle: [IN] Handle for HWRT dataset to be destroyed. + */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * DOC: PowerVR IOCTL SUBMIT_JOBS interface + */ + +/** + * DOC: Flags for the drm_pvr_sync_op object. + * + * .. c:macro:: DRM_PVR_SYNC_OP_HANDLE_TYPE_MASK + * + * Handle type mask for the drm_pvr_sync_op::flags field. + * + * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ + * + * Indicates the handle passed in drm_pvr_sync_op::handle is a syncobj handle. + * This is the default type. + * + * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ + * + * Indicates the handle passed in drm_pvr_sync_op::handle is a timeline syncobj handle. + * + * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_SIGNAL + * + * Signal operation requested. The out-fence bound to the job will be attached to + * the syncobj whose handle is passed in drm_pvr_sync_op::handle. + * + * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_WAIT + * + * Wait operation requested. The job will wait for this particular syncobj or syncobj + * point to be signaled before being started. + * This is the default operation. + */ +#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK 0xf +#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ 0 +#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ 1 +#define DRM_PVR_SYNC_OP_FLAG_SIGNAL _BITULL(31) +#define DRM_PVR_SYNC_OP_FLAG_WAIT 0 + +#define DRM_PVR_SYNC_OP_FLAGS_MASK (DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK | \ + DRM_PVR_SYNC_OP_FLAG_SIGNAL) + +/** + * struct drm_pvr_sync_op - Object describing a sync operation + */ +struct drm_pvr_sync_op { + /** @handle: Handle of sync object. */ + __u32 handle; + + /** @flags: Combination of ``DRM_PVR_SYNC_OP_FLAG_`` flags. */ + __u32 flags; + + /** @value: Timeline value for this drm_syncobj. MBZ for a binary syncobj. */ + __u64 value; +}; + +/** + * DOC: Flags for SUBMIT_JOB ioctl geometry command. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST + * + * Indicates if this the first command to be issued for a render. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST + * + * Indicates if this the last command to be issued for a render. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE + * + * Forces to use single core in a multi core device. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK + * + * Logical OR of all the geometry cmd flags. + */ +#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST _BITULL(0) +#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST _BITULL(1) +#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE _BITULL(2) +#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK \ + (DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST | \ + DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST | \ + DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE) + +/** + * DOC: Flags for SUBMIT_JOB ioctl fragment command. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE + * + * Use single core in a multi core setup. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER + * + * Indicates whether a depth buffer is present. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER + * + * Indicates whether a stencil buffer is present. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP + * + * Disallow compute overlapped with this render. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS + * + * Indicates whether this render produces visibility results. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER + * + * Indicates whether partial renders write to a scratch buffer instead of + * the final surface. It also forces the full screen copy expected to be + * present on the last render after all partial renders have completed. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE + * + * Disable pixel merging for this render. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK + * + * Logical OR of all the fragment cmd flags. + */ +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE _BITULL(0) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER _BITULL(1) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER _BITULL(2) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP _BITULL(3) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER _BITULL(4) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS _BITULL(5) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER _BITULL(6) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE _BITULL(7) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK \ + (DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE) + +/** + * DOC: Flags for SUBMIT_JOB ioctl compute command. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP + * + * Disallow other jobs overlapped with this compute. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE + * + * Forces to use single core in a multi core device. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK + * + * Logical OR of all the compute cmd flags. + */ +#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP _BITULL(0) +#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE _BITULL(1) +#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK \ + (DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP | \ + DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE) + +/** + * DOC: Flags for SUBMIT_JOB ioctl transfer command. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE + * + * Forces job to use a single core in a multi core device. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK + * + * Logical OR of all the transfer cmd flags. + */ +#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE _BITULL(0) + +#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK \ + DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE + +/** + * enum drm_pvr_job_type - Arguments for &struct drm_pvr_job.job_type + */ +enum drm_pvr_job_type { + /** @DRM_PVR_JOB_TYPE_GEOMETRY: Job type is geometry. */ + DRM_PVR_JOB_TYPE_GEOMETRY = 0, + + /** @DRM_PVR_JOB_TYPE_FRAGMENT: Job type is fragment. */ + DRM_PVR_JOB_TYPE_FRAGMENT, + + /** @DRM_PVR_JOB_TYPE_COMPUTE: Job type is compute. */ + DRM_PVR_JOB_TYPE_COMPUTE, + + /** @DRM_PVR_JOB_TYPE_TRANSFER_FRAG: Job type is a fragment transfer. */ + DRM_PVR_JOB_TYPE_TRANSFER_FRAG, +}; + +/** + * struct drm_pvr_hwrt_data_ref - Reference HWRT data + */ +struct drm_pvr_hwrt_data_ref { + /** @set_handle: HWRT data set handle. */ + __u32 set_handle; + + /** @data_index: Index of the HWRT data inside the data set. */ + __u32 data_index; +}; + +/** + * struct drm_pvr_job - Job arguments passed to the %DRM_IOCTL_PVR_SUBMIT_JOBS ioctl + */ +struct drm_pvr_job { + /** + * @type: [IN] Type of job being submitted + * + * This must be one of the values defined by &enum drm_pvr_job_type. + */ + __u32 type; + + /** + * @context_handle: [IN] Context handle. + * + * When @job_type is %DRM_PVR_JOB_TYPE_RENDER, %DRM_PVR_JOB_TYPE_COMPUTE or + * %DRM_PVR_JOB_TYPE_TRANSFER_FRAG, this must be a valid handle returned by + * %DRM_IOCTL_PVR_CREATE_CONTEXT. The type of context must be compatible + * with the type of job being submitted. + * + * When @job_type is %DRM_PVR_JOB_TYPE_NULL, this must be zero. + */ + __u32 context_handle; + + /** + * @flags: [IN] Flags for command. + * + * Those are job-dependent. See all ``DRM_PVR_SUBMIT_JOB_*``. + */ + __u32 flags; + + /** + * @cmd_stream_len: [IN] Length of command stream, in bytes. + */ + __u32 cmd_stream_len; + + /** + * @cmd_stream: [IN] Pointer to command stream for command. + * + * The command stream must be u64-aligned. + */ + __u64 cmd_stream; + + /** @sync_ops: [IN] Fragment sync operations. */ + struct drm_pvr_obj_array sync_ops; + + /** + * @hwrt: [IN] HWRT data used by render jobs (geometry or fragment). + * + * Must be zero for non-render jobs. + */ + struct drm_pvr_hwrt_data_ref hwrt; +}; + +/** + * struct drm_pvr_ioctl_submit_jobs_args - Arguments for %DRM_IOCTL_PVR_SUBMIT_JOB + * + * If the syscall returns an error it is important to check the value of + * @jobs.count. This indicates the index into @jobs.array where the + * error occurred. + */ +struct drm_pvr_ioctl_submit_jobs_args { + /** @jobs: [IN] Array of jobs to submit. */ + struct drm_pvr_obj_array jobs; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* PVR_DRM_UAPI_H */ diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h index 2d348744a853..9dab32316aee 100644 --- a/include/uapi/drm/qaic_accel.h +++ b/include/uapi/drm/qaic_accel.h @@ -242,12 +242,12 @@ struct qaic_attach_slice_entry { * @dbc_id: In. Associate the sliced BO with this DBC. * @handle: In. GEM handle of the BO to slice. * @dir: In. Direction of data flow. 1 = DMA_TO_DEVICE, 2 = DMA_FROM_DEVICE - * @size: In. Total length of the BO. - * If BO is imported (DMABUF/PRIME) then this size - * should not exceed the size of DMABUF provided. - * If BO is allocated using DRM_IOCTL_QAIC_CREATE_BO - * then this size should be exactly same as the size - * provided during DRM_IOCTL_QAIC_CREATE_BO. + * @size: In. Total length of BO being used. This should not exceed base + * size of BO (struct drm_gem_object.base) + * For BOs being allocated using DRM_IOCTL_QAIC_CREATE_BO, size of + * BO requested is PAGE_SIZE aligned then allocated hence allocated + * BO size maybe bigger. This size should not exceed the new + * PAGE_SIZE aligned BO size. * @dev_addr: In. Device address this slice pushes to or pulls from. * @db_addr: In. Address of the doorbell to ring. * @db_data: In. Data to write to the doorbell. @@ -287,8 +287,9 @@ struct qaic_execute_entry { * struct qaic_partial_execute_entry - Defines a BO to resize and submit. * @handle: In. GEM handle of the BO to commit to the device. * @dir: In. Direction of data. 1 = to device, 2 = from device. - * @resize: In. New size of the BO. Must be <= the original BO size. 0 is - * short for no resize. + * @resize: In. New size of the BO. Must be <= the original BO size. + * @resize as 0 would be interpreted as no DMA transfer is + * involved. */ struct qaic_partial_execute_entry { __u32 handle; @@ -372,6 +373,16 @@ struct qaic_perf_stats_entry { __u32 pad; }; +/** + * struct qaic_detach_slice - Detaches slicing configuration from BO. + * @handle: In. GEM handle of the BO to detach slicing configuration. + * @pad: Structure padding. Must be 0. + */ +struct qaic_detach_slice { + __u32 handle; + __u32 pad; +}; + #define DRM_QAIC_MANAGE 0x00 #define DRM_QAIC_CREATE_BO 0x01 #define DRM_QAIC_MMAP_BO 0x02 @@ -380,6 +391,7 @@ struct qaic_perf_stats_entry { #define DRM_QAIC_PARTIAL_EXECUTE_BO 0x05 #define DRM_QAIC_WAIT_BO 0x06 #define DRM_QAIC_PERF_STATS_BO 0x07 +#define DRM_QAIC_DETACH_SLICE_BO 0x08 #define DRM_IOCTL_QAIC_MANAGE DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MANAGE, struct qaic_manage_msg) #define DRM_IOCTL_QAIC_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_CREATE_BO, struct qaic_create_bo) @@ -389,6 +401,7 @@ struct qaic_perf_stats_entry { #define DRM_IOCTL_QAIC_PARTIAL_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_PARTIAL_EXECUTE_BO, struct qaic_execute) #define DRM_IOCTL_QAIC_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_WAIT_BO, struct qaic_wait) #define DRM_IOCTL_QAIC_PERF_STATS_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_PERF_STATS_BO, struct qaic_perf_stats) +#define DRM_IOCTL_QAIC_DETACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_DETACH_SLICE_BO, struct qaic_detach_slice) #if defined(__cplusplus) } diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 3dfc0af8756a..dce1835eced4 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -41,6 +41,7 @@ extern "C" { #define DRM_V3D_PERFMON_CREATE 0x08 #define DRM_V3D_PERFMON_DESTROY 0x09 #define DRM_V3D_PERFMON_GET_VALUES 0x0a +#define DRM_V3D_SUBMIT_CPU 0x0b #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) @@ -56,6 +57,7 @@ extern "C" { struct drm_v3d_perfmon_destroy) #define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \ struct drm_v3d_perfmon_get_values) +#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu) #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 #define DRM_V3D_SUBMIT_EXTENSION 0x02 @@ -69,7 +71,13 @@ extern "C" { struct drm_v3d_extension { __u64 next; __u32 id; -#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 +#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 +#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02 +#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03 +#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04 +#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05 +#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06 +#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07 __u32 flags; /* mbz */ }; @@ -93,6 +101,7 @@ enum v3d_queue { V3D_TFU, V3D_CSD, V3D_CACHE_CLEAN, + V3D_CPU, }; /** @@ -276,6 +285,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH, DRM_V3D_PARAM_SUPPORTS_PERFMON, DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT, + DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE, }; struct drm_v3d_get_param { @@ -319,6 +329,11 @@ struct drm_v3d_submit_tfu { /* Pointer to an array of ioctl extensions*/ __u64 extensions; + + struct { + __u32 ioc; + __u32 pad; + } v71; }; /* Submits a compute shader for dispatch. This job will block on any @@ -356,6 +371,234 @@ struct drm_v3d_submit_csd { __u32 pad; }; +/** + * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an + * indirect CSD + * + * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it + * points to this extension to define a indirect CSD submission. It creates a + * CPU job linked to a CSD job. The CPU job waits for the indirect CSD + * dependencies and, once they are signaled, it updates the CSD job config + * before allowing the CSD job execution. + */ +struct drm_v3d_indirect_csd { + struct drm_v3d_extension base; + + /* Indirect CSD */ + struct drm_v3d_submit_csd submit; + + /* Handle of the indirect BO, that should be also attached to the + * indirect CSD. + */ + __u32 indirect; + + /* Offset within the BO where the workgroup counts are stored */ + __u32 offset; + + /* Workgroups size */ + __u32 wg_size; + + /* Indices of the uniforms with the workgroup dispatch counts + * in the uniform stream. If the uniform rewrite is not needed, + * the offset must be 0xffffffff. + */ + __u32 wg_uniform_offsets[3]; +}; + +/** + * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate + * a timestamp query + * + * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to + * this extension to define a timestamp query submission. This CPU job will + * calculate the timestamp query and update the query value within the + * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate + * query availability. + */ +struct drm_v3d_timestamp_query { + struct drm_v3d_extension base; + + /* Array of queries' offsets within the timestamp BO for their value */ + __u64 offsets; + + /* Array of timestamp's syncobjs to indicate its availability */ + __u64 syncs; + + /* Number of queries */ + __u32 count; + + /* mbz */ + __u32 pad; +}; + +/** + * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to + * reset timestamp queries + * + * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it + * points to this extension to define a reset timestamp submission. This CPU + * job will reset the timestamp queries based on value offset of the first + * query. Moreover, it will reset the timestamp syncobj to reset query + * availability. + */ +struct drm_v3d_reset_timestamp_query { + struct drm_v3d_extension base; + + /* Array of timestamp's syncobjs to indicate its availability */ + __u64 syncs; + + /* Offset of the first query within the timestamp BO for its value */ + __u32 offset; + + /* Number of queries */ + __u32 count; +}; + +/** + * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy + * query results to a buffer + * + * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it + * points to this extension to define a copy timestamp query submission. This + * CPU job will copy the timestamp queries results to a BO with the offset + * and stride defined in the extension. + */ +struct drm_v3d_copy_timestamp_query { + struct drm_v3d_extension base; + + /* Define if should write to buffer using 64 or 32 bits */ + __u8 do_64bit; + + /* Define if it can write to buffer even if the query is not available */ + __u8 do_partial; + + /* Define if it should write availability bit to buffer */ + __u8 availability_bit; + + /* mbz */ + __u8 pad; + + /* Offset of the buffer in the BO */ + __u32 offset; + + /* Stride of the buffer in the BO */ + __u32 stride; + + /* Number of queries */ + __u32 count; + + /* Array of queries' offsets within the timestamp BO for their value */ + __u64 offsets; + + /* Array of timestamp's syncobjs to indicate its availability */ + __u64 syncs; +}; + +/** + * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to + * reset performance queries + * + * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it + * points to this extension to define a reset performance submission. This CPU + * job will reset the performance queries by resetting the values of the + * performance monitors. Moreover, it will reset the syncobj to reset query + * availability. + */ +struct drm_v3d_reset_performance_query { + struct drm_v3d_extension base; + + /* Array of performance queries's syncobjs to indicate its availability */ + __u64 syncs; + + /* Number of queries */ + __u32 count; + + /* Number of performance monitors */ + __u32 nperfmons; + + /* Array of u64 user-pointers that point to an array of kperfmon_ids */ + __u64 kperfmon_ids; +}; + +/** + * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy + * performance query results to a buffer + * + * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it + * points to this extension to define a copy performance query submission. This + * CPU job will copy the performance queries results to a BO with the offset + * and stride defined in the extension. + */ +struct drm_v3d_copy_performance_query { + struct drm_v3d_extension base; + + /* Define if should write to buffer using 64 or 32 bits */ + __u8 do_64bit; + + /* Define if it can write to buffer even if the query is not available */ + __u8 do_partial; + + /* Define if it should write availability bit to buffer */ + __u8 availability_bit; + + /* mbz */ + __u8 pad; + + /* Offset of the buffer in the BO */ + __u32 offset; + + /* Stride of the buffer in the BO */ + __u32 stride; + + /* Number of performance monitors */ + __u32 nperfmons; + + /* Number of performance counters related to this query pool */ + __u32 ncounters; + + /* Number of queries */ + __u32 count; + + /* Array of performance queries's syncobjs to indicate its availability */ + __u64 syncs; + + /* Array of u64 user-pointers that point to an array of kperfmon_ids */ + __u64 kperfmon_ids; +}; + +struct drm_v3d_submit_cpu { + /* Pointer to a u32 array of the BOs that are referenced by the job. + * + * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO, + * that contains the workgroup counts. + * + * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO, + * that will contain the timestamp. + * + * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only + * one BO, that contains the timestamp. + * + * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two + * BOs. The first is the BO where the timestamp queries will be written + * to. The second is the BO that contains the timestamp. + * + * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no + * BOs. + * + * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one + * BO, where the performance queries will be written. + */ + __u64 bo_handles; + + /* Number of BO handles passed in (size is that times 4). */ + __u32 bo_handle_count; + + __u32 flags; + + /* Pointer to an array of ioctl extensions*/ + __u64 extensions; +}; + enum { V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS, V3D_PERFCNT_FEP_VALID_PRIMS, diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index b1d0e56565bc..c2ce71987e9b 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -97,6 +97,7 @@ struct drm_virtgpu_execbuffer { #define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */ #define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */ #define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */ +#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */ struct drm_virtgpu_getparam { __u64 param; @@ -198,6 +199,7 @@ struct drm_virtgpu_resource_create_blob { #define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001 #define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002 #define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003 +#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004 struct drm_virtgpu_context_set_param { __u64 param; __u64 value; diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h new file mode 100644 index 000000000000..9fa3ae324731 --- /dev/null +++ b/include/uapi/drm/xe_drm.h @@ -0,0 +1,1347 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _UAPI_XE_DRM_H_ +#define _UAPI_XE_DRM_H_ + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * Please note that modifications to all structs defined here are + * subject to backwards-compatibility constraints. + * Sections in this file are organized as follows: + * 1. IOCTL definition + * 2. Extension definition and helper structs + * 3. IOCTL's Query structs in the order of the Query's entries. + * 4. The rest of IOCTL structs in the order of IOCTL declaration. + */ + +/** + * DOC: Xe Device Block Diagram + * + * The diagram below represents a high-level simplification of a discrete + * GPU supported by the Xe driver. It shows some device components which + * are necessary to understand this API, as well as how their relations + * to each other. This diagram does not represent real hardware:: + * + * ┌──────────────────────────────────────────────────────────────────┐ + * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │ + * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │ + * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │ + * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │ + * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │ + * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │ + * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │ + * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │ + * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │ + * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │ + * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ + * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │ + * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ + * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │ + * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │ + * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │ + * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │ + * └─────────────────────────────Device0───────┬──────────────────────┘ + * │ + * ───────────────────────┴────────── PCI bus + */ + +/** + * DOC: Xe uAPI Overview + * + * This section aims to describe the Xe's IOCTL entries, its structs, and other + * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related + * entries and usage. + * + * List of supported IOCTLs: + * - &DRM_IOCTL_XE_DEVICE_QUERY + * - &DRM_IOCTL_XE_GEM_CREATE + * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET + * - &DRM_IOCTL_XE_VM_CREATE + * - &DRM_IOCTL_XE_VM_DESTROY + * - &DRM_IOCTL_XE_VM_BIND + * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE + * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY + * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY + * - &DRM_IOCTL_XE_EXEC + * - &DRM_IOCTL_XE_WAIT_USER_FENCE + */ + +/* + * xe specific ioctls. + * + * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie + * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset + * against DRM_COMMAND_BASE and should be between [0x0, 0x60). + */ +#define DRM_XE_DEVICE_QUERY 0x00 +#define DRM_XE_GEM_CREATE 0x01 +#define DRM_XE_GEM_MMAP_OFFSET 0x02 +#define DRM_XE_VM_CREATE 0x03 +#define DRM_XE_VM_DESTROY 0x04 +#define DRM_XE_VM_BIND 0x05 +#define DRM_XE_EXEC_QUEUE_CREATE 0x06 +#define DRM_XE_EXEC_QUEUE_DESTROY 0x07 +#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08 +#define DRM_XE_EXEC 0x09 +#define DRM_XE_WAIT_USER_FENCE 0x0a +/* Must be kept compact -- no holes */ + +#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query) +#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create) +#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset) +#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create) +#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy) +#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind) +#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create) +#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy) +#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property) +#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec) +#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence) + +/** + * DOC: Xe IOCTL Extensions + * + * Before detailing the IOCTLs and its structs, it is important to highlight + * that every IOCTL in Xe is extensible. + * + * Many interfaces need to grow over time. In most cases we can simply + * extend the struct and have userspace pass in more data. Another option, + * as demonstrated by Vulkan's approach to providing extensions for forward + * and backward compatibility, is to use a list of optional structs to + * provide those extra details. + * + * The key advantage to using an extension chain is that it allows us to + * redefine the interface more easily than an ever growing struct of + * increasing complexity, and for large parts of that interface to be + * entirely optional. The downside is more pointer chasing; chasing across + * the __user boundary with pointers encapsulated inside u64. + * + * Example chaining: + * + * .. code-block:: C + * + * struct drm_xe_user_extension ext3 { + * .next_extension = 0, // end + * .name = ..., + * }; + * struct drm_xe_user_extension ext2 { + * .next_extension = (uintptr_t)&ext3, + * .name = ..., + * }; + * struct drm_xe_user_extension ext1 { + * .next_extension = (uintptr_t)&ext2, + * .name = ..., + * }; + * + * Typically the struct drm_xe_user_extension would be embedded in some uAPI + * struct, and in this case we would feed it the head of the chain(i.e ext1), + * which would then apply all of the above extensions. +*/ + +/** + * struct drm_xe_user_extension - Base class for defining a chain of extensions + */ +struct drm_xe_user_extension { + /** + * @next_extension: + * + * Pointer to the next struct drm_xe_user_extension, or zero if the end. + */ + __u64 next_extension; + + /** + * @name: Name of the extension. + * + * Note that the name here is just some integer. + * + * Also note that the name space for this is not global for the whole + * driver, but rather its scope/meaning is limited to the specific piece + * of uAPI which has embedded the struct drm_xe_user_extension. + */ + __u32 name; + + /** + * @pad: MBZ + * + * All undefined bits must be zero. + */ + __u32 pad; +}; + +/** + * struct drm_xe_ext_set_property - Generic set property extension + * + * A generic struct that allows any of the Xe's IOCTL to be extended + * with a set_property operation. + */ +struct drm_xe_ext_set_property { + /** @base: base user extension */ + struct drm_xe_user_extension base; + + /** @property: property to set */ + __u32 property; + + /** @pad: MBZ */ + __u32 pad; + + /** @value: property value */ + __u64 value; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_engine_class_instance - instance of an engine class + * + * It is returned as part of the @drm_xe_engine, but it also is used as + * the input of engine selection for both @drm_xe_exec_queue_create and + * @drm_xe_query_engine_cycles + * + * The @engine_class can be: + * - %DRM_XE_ENGINE_CLASS_RENDER + * - %DRM_XE_ENGINE_CLASS_COPY + * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE + * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE + * - %DRM_XE_ENGINE_CLASS_COMPUTE + * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual + * hardware engine class). Used for creating ordered queues of VM + * bind operations. + */ +struct drm_xe_engine_class_instance { +#define DRM_XE_ENGINE_CLASS_RENDER 0 +#define DRM_XE_ENGINE_CLASS_COPY 1 +#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2 +#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3 +#define DRM_XE_ENGINE_CLASS_COMPUTE 4 +#define DRM_XE_ENGINE_CLASS_VM_BIND 5 + /** @engine_class: engine class id */ + __u16 engine_class; + /** @engine_instance: engine instance id */ + __u16 engine_instance; + /** @gt_id: Unique ID of this GT within the PCI Device */ + __u16 gt_id; + /** @pad: MBZ */ + __u16 pad; +}; + +/** + * struct drm_xe_engine - describe hardware engine + */ +struct drm_xe_engine { + /** @instance: The @drm_xe_engine_class_instance */ + struct drm_xe_engine_class_instance instance; + + /** @reserved: Reserved */ + __u64 reserved[3]; +}; + +/** + * struct drm_xe_query_engines - describe engines + * + * If a query is made with a struct @drm_xe_device_query where .query + * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of + * struct @drm_xe_query_engines in .data. + */ +struct drm_xe_query_engines { + /** @num_engines: number of engines returned in @engines */ + __u32 num_engines; + /** @pad: MBZ */ + __u32 pad; + /** @engines: The returned engines for this device */ + struct drm_xe_engine engines[]; +}; + +/** + * enum drm_xe_memory_class - Supported memory classes. + */ +enum drm_xe_memory_class { + /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ + DRM_XE_MEM_REGION_CLASS_SYSMEM = 0, + /** + * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this + * represents the memory that is local to the device, which we + * call VRAM. Not valid on integrated platforms. + */ + DRM_XE_MEM_REGION_CLASS_VRAM +}; + +/** + * struct drm_xe_mem_region - Describes some region as known to + * the driver. + */ +struct drm_xe_mem_region { + /** + * @mem_class: The memory class describing this region. + * + * See enum drm_xe_memory_class for supported values. + */ + __u16 mem_class; + /** + * @instance: The unique ID for this region, which serves as the + * index in the placement bitmask used as argument for + * &DRM_IOCTL_XE_GEM_CREATE + */ + __u16 instance; + /** + * @min_page_size: Min page-size in bytes for this region. + * + * When the kernel allocates memory for this region, the + * underlying pages will be at least @min_page_size in size. + * Buffer objects with an allowable placement in this region must be + * created with a size aligned to this value. + * GPU virtual address mappings of (parts of) buffer objects that + * may be placed in this region must also have their GPU virtual + * address and range aligned to this value. + * Affected IOCTLS will return %-EINVAL if alignment restrictions are + * not met. + */ + __u32 min_page_size; + /** + * @total_size: The usable size in bytes for this region. + */ + __u64 total_size; + /** + * @used: Estimate of the memory used in bytes for this region. + * + * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable + * accounting. Without this the value here will always equal + * zero. + */ + __u64 used; + /** + * @cpu_visible_size: How much of this region can be CPU + * accessed, in bytes. + * + * This will always be <= @total_size, and the remainder (if + * any) will not be CPU accessible. If the CPU accessible part + * is smaller than @total_size then this is referred to as a + * small BAR system. + * + * On systems without small BAR (full BAR), the probed_size will + * always equal the @total_size, since all of it will be CPU + * accessible. + * + * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM + * regions (for other types the value here will always equal + * zero). + */ + __u64 cpu_visible_size; + /** + * @cpu_visible_used: Estimate of CPU visible memory used, in + * bytes. + * + * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable + * accounting. Without this the value here will always equal + * zero. Note this is only currently tracked for + * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value + * here will always be zero). + */ + __u64 cpu_visible_used; + /** @reserved: Reserved */ + __u64 reserved[6]; +}; + +/** + * struct drm_xe_query_mem_regions - describe memory regions + * + * If a query is made with a struct drm_xe_device_query where .query + * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses + * struct drm_xe_query_mem_regions in .data. + */ +struct drm_xe_query_mem_regions { + /** @num_mem_regions: number of memory regions returned in @mem_regions */ + __u32 num_mem_regions; + /** @pad: MBZ */ + __u32 pad; + /** @mem_regions: The returned memory regions for this device */ + struct drm_xe_mem_region mem_regions[]; +}; + +/** + * struct drm_xe_query_config - describe the device configuration + * + * If a query is made with a struct drm_xe_device_query where .query + * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses + * struct drm_xe_query_config in .data. + * + * The index in @info can be: + * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits) + * and the device revision (next 8 bits) + * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device + * configuration, see list below + * + * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device + * has usable VRAM + * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment + * required by this device, typically SZ_4K or SZ_64K + * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address + * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest + * available exec queue priority + */ +struct drm_xe_query_config { + /** @num_params: number of parameters returned in info */ + __u32 num_params; + + /** @pad: MBZ */ + __u32 pad; + +#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 +#define DRM_XE_QUERY_CONFIG_FLAGS 1 + #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0) +#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2 +#define DRM_XE_QUERY_CONFIG_VA_BITS 3 +#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 + /** @info: array of elements containing the config info */ + __u64 info[]; +}; + +/** + * struct drm_xe_gt - describe an individual GT. + * + * To be used with drm_xe_query_gt_list, which will return a list with all the + * existing GT individual descriptions. + * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for + * implementing graphics and/or media operations. + * + * The index in @type can be: + * - %DRM_XE_QUERY_GT_TYPE_MAIN + * - %DRM_XE_QUERY_GT_TYPE_MEDIA + */ +struct drm_xe_gt { +#define DRM_XE_QUERY_GT_TYPE_MAIN 0 +#define DRM_XE_QUERY_GT_TYPE_MEDIA 1 + /** @type: GT type: Main or Media */ + __u16 type; + /** @tile_id: Tile ID where this GT lives (Information only) */ + __u16 tile_id; + /** @gt_id: Unique ID of this GT within the PCI Device */ + __u16 gt_id; + /** @pad: MBZ */ + __u16 pad[3]; + /** @reference_clock: A clock frequency for timestamp */ + __u32 reference_clock; + /** + * @near_mem_regions: Bit mask of instances from + * drm_xe_query_mem_regions that are nearest to the current engines + * of this GT. + * Each index in this mask refers directly to the struct + * drm_xe_query_mem_regions' instance, no assumptions should + * be made about order. The type of each region is described + * by struct drm_xe_query_mem_regions' mem_class. + */ + __u64 near_mem_regions; + /** + * @far_mem_regions: Bit mask of instances from + * drm_xe_query_mem_regions that are far from the engines of this GT. + * In general, they have extra indirections when compared to the + * @near_mem_regions. For a discrete device this could mean system + * memory and memory living in a different tile. + * Each index in this mask refers directly to the struct + * drm_xe_query_mem_regions' instance, no assumptions should + * be made about order. The type of each region is described + * by struct drm_xe_query_mem_regions' mem_class. + */ + __u64 far_mem_regions; + /** @reserved: Reserved */ + __u64 reserved[8]; +}; + +/** + * struct drm_xe_query_gt_list - A list with GT description items. + * + * If a query is made with a struct drm_xe_device_query where .query + * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct + * drm_xe_query_gt_list in .data. + */ +struct drm_xe_query_gt_list { + /** @num_gt: number of GT items returned in gt_list */ + __u32 num_gt; + /** @pad: MBZ */ + __u32 pad; + /** @gt_list: The GT list returned for this device */ + struct drm_xe_gt gt_list[]; +}; + +/** + * struct drm_xe_query_topology_mask - describe the topology mask of a GT + * + * This is the hardware topology which reflects the internal physical + * structure of the GPU. + * + * If a query is made with a struct drm_xe_device_query where .query + * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses + * struct drm_xe_query_topology_mask in .data. + * + * The @type can be: + * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices + * (DSS) available for geometry operations. For example a query response + * containing the following in mask: + * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00`` + * means 32 DSS are available for geometry. + * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices + * (DSS) available for compute operations. For example a query response + * containing the following in mask: + * ``DSS_COMPUTE ff ff ff ff 00 00 00 00`` + * means 32 DSS are available for compute. + * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU) + * available per Dual Sub Slices (DSS). For example a query response + * containing the following in mask: + * ``EU_PER_DSS ff ff 00 00 00 00 00 00`` + * means each DSS has 16 EU. + */ +struct drm_xe_query_topology_mask { + /** @gt_id: GT ID the mask is associated with */ + __u16 gt_id; + +#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0) +#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1) +#define DRM_XE_TOPO_EU_PER_DSS (1 << 2) + /** @type: type of mask */ + __u16 type; + + /** @num_bytes: number of bytes in requested mask */ + __u32 num_bytes; + + /** @mask: little-endian mask of @num_bytes */ + __u8 mask[]; +}; + +/** + * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps + * + * If a query is made with a struct drm_xe_device_query where .query is equal to + * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles + * in .data. struct drm_xe_query_engine_cycles is allocated by the user and + * .data points to this allocated structure. + * + * The query returns the engine cycles, which along with GT's @reference_clock, + * can be used to calculate the engine timestamp. In addition the + * query returns a set of cpu timestamps that indicate when the command + * streamer cycle count was captured. + */ +struct drm_xe_query_engine_cycles { + /** + * @eci: This is input by the user and is the engine for which command + * streamer cycles is queried. + */ + struct drm_xe_engine_class_instance eci; + + /** + * @clockid: This is input by the user and is the reference clock id for + * CPU timestamp. For definition, see clock_gettime(2) and + * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC, + * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI. + */ + __s32 clockid; + + /** @width: Width of the engine cycle counter in bits. */ + __u32 width; + + /** + * @engine_cycles: Engine cycles as read from its register + * at 0x358 offset. + */ + __u64 engine_cycles; + + /** + * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before + * reading the engine_cycles register using the reference clockid set by the + * user. + */ + __u64 cpu_timestamp; + + /** + * @cpu_delta: Time delta in ns captured around reading the lower dword + * of the engine_cycles register. + */ + __u64 cpu_delta; +}; + +/** + * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main + * structure to query device information + * + * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_* + * and sets the value in the query member. This determines the type of + * the structure provided by the driver in data, among struct drm_xe_query_*. + * + * The @query can be: + * - %DRM_XE_DEVICE_QUERY_ENGINES + * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS + * - %DRM_XE_DEVICE_QUERY_CONFIG + * - %DRM_XE_DEVICE_QUERY_GT_LIST + * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware + * configuration of the device such as information on slices, memory, + * caches, and so on. It is provided as a table of key / value + * attributes. + * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY + * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES + * + * If size is set to 0, the driver fills it with the required size for + * the requested type of data to query. If size is equal to the required + * size, the queried information is copied into data. If size is set to + * a value different from 0 and different from the required size, the + * IOCTL call returns -EINVAL. + * + * For example the following code snippet allows retrieving and printing + * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES: + * + * .. code-block:: C + * + * struct drm_xe_query_engines *engines; + * struct drm_xe_device_query query = { + * .extensions = 0, + * .query = DRM_XE_DEVICE_QUERY_ENGINES, + * .size = 0, + * .data = 0, + * }; + * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); + * engines = malloc(query.size); + * query.data = (uintptr_t)engines; + * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); + * for (int i = 0; i < engines->num_engines; i++) { + * printf("Engine %d: %s\n", i, + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER": + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_COPY ? "COPY": + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE": + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE": + * engines->engines[i].instance.engine_class == + * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE": + * "UNKNOWN"); + * } + * free(engines); + */ +struct drm_xe_device_query { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + +#define DRM_XE_DEVICE_QUERY_ENGINES 0 +#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1 +#define DRM_XE_DEVICE_QUERY_CONFIG 2 +#define DRM_XE_DEVICE_QUERY_GT_LIST 3 +#define DRM_XE_DEVICE_QUERY_HWCONFIG 4 +#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5 +#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6 + /** @query: The type of data to query */ + __u32 query; + + /** @size: Size of the queried data */ + __u32 size; + + /** @data: Queried data is placed here */ + __u64 data; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for + * gem creation + * + * The @flags can be: + * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING + * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT + * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a + * possible placement, ensure that the corresponding VRAM allocation + * will always use the CPU accessible part of VRAM. This is important + * for small-bar systems (on full-bar systems this gets turned into a + * noop). + * Note1: System memory can be used as an extra placement if the kernel + * should spill the allocation to system memory, if space can't be made + * available in the CPU accessible part of VRAM (giving the same + * behaviour as the i915 interface, see + * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS). + * Note2: For clear-color CCS surfaces the kernel needs to read the + * clear-color value stored in the buffer, and on discrete platforms we + * need to use VRAM for display surfaces, therefore the kernel requires + * setting this flag for such objects, otherwise an error is thrown on + * small-bar systems. + * + * @cpu_caching supports the following values: + * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back + * caching. On iGPU this can't be used for scanout surfaces. Currently + * not allowed for objects placed in VRAM. + * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This + * is uncached. Scanout surfaces should likely use this. All objects + * that can be placed in VRAM must use this. + */ +struct drm_xe_gem_create { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** + * @size: Size of the object to be created, must match region + * (system or vram) minimum alignment (&min_page_size). + */ + __u64 size; + + /** + * @placement: A mask of memory instances of where BO can be placed. + * Each index in this mask refers directly to the struct + * drm_xe_query_mem_regions' instance, no assumptions should + * be made about order. The type of each region is described + * by struct drm_xe_query_mem_regions' mem_class. + */ + __u32 placement; + +#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0) +#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1) +#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2) + /** + * @flags: Flags, currently a mask of memory instances of where BO can + * be placed + */ + __u32 flags; + + /** + * @vm_id: Attached VM, if any + * + * If a VM is specified, this BO must: + * + * 1. Only ever be bound to that VM. + * 2. Cannot be exported as a PRIME fd. + */ + __u32 vm_id; + + /** + * @handle: Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; + +#define DRM_XE_GEM_CPU_CACHING_WB 1 +#define DRM_XE_GEM_CPU_CACHING_WC 2 + /** + * @cpu_caching: The CPU caching mode to select for this object. If + * mmaping the object the mode selected here will also be used. + */ + __u16 cpu_caching; + /** @pad: MBZ */ + __u16 pad[3]; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET + */ +struct drm_xe_gem_mmap_offset { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @handle: Handle for the object being mapped. */ + __u32 handle; + + /** @flags: Must be zero */ + __u32 flags; + + /** @offset: The fake offset to use for subsequent mmap call */ + __u64 offset; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE + * + * The @flags can be: + * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE + * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts + * exec submissions to its exec_queues that don't have an upper time + * limit on the job execution time. But exec submissions to these + * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ, + * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF, + * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL. + * LR VMs can be created in recoverable page-fault mode using + * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it. + * If that flag is omitted, the UMD can not rely on the slightly + * different per-VM overcommit semantics that are enabled by + * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may + * still enable recoverable pagefaults if supported by the device. + * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also + * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on + * demand when accessed, and also allows per-VM overcommit of memory. + * The xe driver internally uses recoverable pagefaults to implement + * this. + */ +struct drm_xe_vm_create { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + +#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0) +#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1) +#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2) + /** @flags: Flags */ + __u32 flags; + + /** @vm_id: Returned VM ID */ + __u32 vm_id; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY + */ +struct drm_xe_vm_destroy { + /** @vm_id: VM ID */ + __u32 vm_id; + + /** @pad: MBZ */ + __u32 pad; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_vm_bind_op - run bind operations + * + * The @op can be: + * - %DRM_XE_VM_BIND_OP_MAP + * - %DRM_XE_VM_BIND_OP_UNMAP + * - %DRM_XE_VM_BIND_OP_MAP_USERPTR + * - %DRM_XE_VM_BIND_OP_UNMAP_ALL + * - %DRM_XE_VM_BIND_OP_PREFETCH + * + * and the @flags can be: + * - %DRM_XE_VM_BIND_FLAG_READONLY + * - %DRM_XE_VM_BIND_FLAG_ASYNC + * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - Valid on a faulting VM only, do the + * MAP operation immediately rather than deferring the MAP to the page + * fault handler. + * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page + * tables are setup with a special bit which indicates writes are + * dropped and all reads return zero. In the future, the NULL flags + * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO + * handle MBZ, and the BO offset MBZ. This flag is intended to + * implement VK sparse bindings. + */ +struct drm_xe_vm_bind_op { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** + * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP + */ + __u32 obj; + + /** + * @pat_index: The platform defined @pat_index to use for this mapping. + * The index basically maps to some predefined memory attributes, + * including things like caching, coherency, compression etc. The exact + * meaning of the pat_index is platform specific and defined in the + * Bspec and PRMs. When the KMD sets up the binding the index here is + * encoded into the ppGTT PTE. + * + * For coherency the @pat_index needs to be at least 1way coherent when + * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD + * will extract the coherency mode from the @pat_index and reject if + * there is a mismatch (see note below for pre-MTL platforms). + * + * Note: On pre-MTL platforms there is only a caching mode and no + * explicit coherency mode, but on such hardware there is always a + * shared-LLC (or is dgpu) so all GT memory accesses are coherent with + * CPU caches even with the caching mode set as uncached. It's only the + * display engine that is incoherent (on dgpu it must be in VRAM which + * is always mapped as WC on the CPU). However to keep the uapi somewhat + * consistent with newer platforms the KMD groups the different cache + * levels into the following coherency buckets on all pre-MTL platforms: + * + * ppGTT UC -> COH_NONE + * ppGTT WC -> COH_NONE + * ppGTT WT -> COH_NONE + * ppGTT WB -> COH_AT_LEAST_1WAY + * + * In practice UC/WC/WT should only ever used for scanout surfaces on + * such platforms (or perhaps in general for dma-buf if shared with + * another device) since it is only the display engine that is actually + * incoherent. Everything else should typically use WB given that we + * have a shared-LLC. On MTL+ this completely changes and the HW + * defines the coherency mode as part of the @pat_index, where + * incoherent GT access is possible. + * + * Note: For userptr and externally imported dma-buf the kernel expects + * either 1WAY or 2WAY for the @pat_index. + * + * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions + * on the @pat_index. For such mappings there is no actual memory being + * mapped (the address in the PTE is invalid), so the various PAT memory + * attributes likely do not apply. Simply leaving as zero is one + * option (still a valid pat_index). + */ + __u16 pat_index; + + /** @pad: MBZ */ + __u16 pad; + + union { + /** + * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE, + * ignored for unbind + */ + __u64 obj_offset; + + /** @userptr: user pointer to bind on */ + __u64 userptr; + }; + + /** + * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL + */ + __u64 range; + + /** @addr: Address to operate on, MBZ for UNMAP_ALL */ + __u64 addr; + +#define DRM_XE_VM_BIND_OP_MAP 0x0 +#define DRM_XE_VM_BIND_OP_UNMAP 0x1 +#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2 +#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3 +#define DRM_XE_VM_BIND_OP_PREFETCH 0x4 + /** @op: Bind operation to perform */ + __u32 op; + +#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0) +#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1) +#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2) + /** @flags: Bind flags */ + __u32 flags; + + /** + * @prefetch_mem_region_instance: Memory region to prefetch VMA to. + * It is a region instance, not a mask. + * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation. + */ + __u32 prefetch_mem_region_instance; + + /** @pad2: MBZ */ + __u32 pad2; + + /** @reserved: Reserved */ + __u64 reserved[3]; +}; + +/** + * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND + * + * Below is an example of a minimal use of @drm_xe_vm_bind to + * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to + * illustrate `userptr`. It can be synchronized by using the example + * provided for @drm_xe_sync. + * + * .. code-block:: C + * + * data = aligned_alloc(ALIGNMENT, BO_SIZE); + * struct drm_xe_vm_bind bind = { + * .vm_id = vm, + * .num_binds = 1, + * .bind.obj = 0, + * .bind.obj_offset = to_user_pointer(data), + * .bind.range = BO_SIZE, + * .bind.addr = BIND_ADDRESS, + * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR, + * .bind.flags = 0, + * .num_syncs = 1, + * .syncs = &sync, + * .exec_queue_id = 0, + * }; + * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind); + * + */ +struct drm_xe_vm_bind { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @vm_id: The ID of the VM to bind to */ + __u32 vm_id; + + /** + * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND + * and exec queue must have same vm_id. If zero, the default VM bind engine + * is used. + */ + __u32 exec_queue_id; + + /** @pad: MBZ */ + __u32 pad; + + /** @num_binds: number of binds in this IOCTL */ + __u32 num_binds; + + union { + /** @bind: used if num_binds == 1 */ + struct drm_xe_vm_bind_op bind; + + /** + * @vector_of_binds: userptr to array of struct + * drm_xe_vm_bind_op if num_binds > 1 + */ + __u64 vector_of_binds; + }; + + /** @pad2: MBZ */ + __u32 pad2; + + /** @num_syncs: amount of syncs to wait on */ + __u32 num_syncs; + + /** @syncs: pointer to struct drm_xe_sync array */ + __u64 syncs; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE + * + * The example below shows how to use @drm_xe_exec_queue_create to create + * a simple exec_queue (no parallel submission) of class + * &DRM_XE_ENGINE_CLASS_RENDER. + * + * .. code-block:: C + * + * struct drm_xe_engine_class_instance instance = { + * .engine_class = DRM_XE_ENGINE_CLASS_RENDER, + * }; + * struct drm_xe_exec_queue_create exec_queue_create = { + * .extensions = 0, + * .vm_id = vm, + * .num_bb_per_exec = 1, + * .num_eng_per_bb = 1, + * .instances = to_user_pointer(&instance), + * }; + * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create); + * + */ +struct drm_xe_exec_queue_create { +#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7 +/* Monitor 128KB contiguous region with 4K sub-granularity */ +#define DRM_XE_ACC_GRANULARITY_128K 0 +/* Monitor 2MB contiguous region with 64KB sub-granularity */ +#define DRM_XE_ACC_GRANULARITY_2M 1 +/* Monitor 16MB contiguous region with 512KB sub-granularity */ +#define DRM_XE_ACC_GRANULARITY_16M 2 +/* Monitor 64MB contiguous region with 2M sub-granularity */ +#define DRM_XE_ACC_GRANULARITY_64M 3 + + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @width: submission width (number BB per exec) for this exec queue */ + __u16 width; + + /** @num_placements: number of valid placements for this exec queue */ + __u16 num_placements; + + /** @vm_id: VM to use for this exec queue */ + __u32 vm_id; + + /** @flags: MBZ */ + __u32 flags; + + /** @exec_queue_id: Returned exec queue ID */ + __u32 exec_queue_id; + + /** + * @instances: user pointer to a 2-d array of struct + * drm_xe_engine_class_instance + * + * length = width (i) * num_placements (j) + * index = j + i * width + */ + __u64 instances; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY + */ +struct drm_xe_exec_queue_destroy { + /** @exec_queue_id: Exec queue ID */ + __u32 exec_queue_id; + + /** @pad: MBZ */ + __u32 pad; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY + * + * The @property can be: + * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN + */ +struct drm_xe_exec_queue_get_property { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @exec_queue_id: Exec queue ID */ + __u32 exec_queue_id; + +#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 + /** @property: property to get */ + __u32 property; + + /** @value: property value */ + __u64 value; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_sync - sync object + * + * The @type can be: + * - %DRM_XE_SYNC_TYPE_SYNCOBJ + * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ + * - %DRM_XE_SYNC_TYPE_USER_FENCE + * + * and the @flags can be: + * - %DRM_XE_SYNC_FLAG_SIGNAL + * + * A minimal use of @drm_xe_sync looks like this: + * + * .. code-block:: C + * + * struct drm_xe_sync sync = { + * .flags = DRM_XE_SYNC_FLAG_SIGNAL, + * .type = DRM_XE_SYNC_TYPE_SYNCOBJ, + * }; + * struct drm_syncobj_create syncobj_create = { 0 }; + * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create); + * sync.handle = syncobj_create.handle; + * ... + * use of &sync in drm_xe_exec or drm_xe_vm_bind + * ... + * struct drm_syncobj_wait wait = { + * .handles = &sync.handle, + * .timeout_nsec = INT64_MAX, + * .count_handles = 1, + * .flags = 0, + * .first_signaled = 0, + * .pad = 0, + * }; + * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait); + */ +struct drm_xe_sync { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + +#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0 +#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1 +#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2 + /** @type: Type of the this sync object */ + __u32 type; + +#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0) + /** @flags: Sync Flags */ + __u32 flags; + + union { + /** @handle: Handle for the object */ + __u32 handle; + + /** + * @addr: Address of user fence. When sync is passed in via exec + * IOCTL this is a GPU address in the VM. When sync passed in via + * VM bind IOCTL this is a user pointer. In either case, it is + * the users responsibility that this address is present and + * mapped when the user fence is signalled. Must be qword + * aligned. + */ + __u64 addr; + }; + + /** + * @timeline_value: Input for the timeline sync object. Needs to be + * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ. + */ + __u64 timeline_value; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC + * + * This is an example to use @drm_xe_exec for execution of the object + * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue + * (see example in @drm_xe_exec_queue_create). It can be synchronized + * by using the example provided for @drm_xe_sync. + * + * .. code-block:: C + * + * struct drm_xe_exec exec = { + * .exec_queue_id = exec_queue, + * .syncs = &sync, + * .num_syncs = 1, + * .address = BIND_ADDRESS, + * .num_batch_buffer = 1, + * }; + * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec); + * + */ +struct drm_xe_exec { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @exec_queue_id: Exec queue ID for the batch buffer */ + __u32 exec_queue_id; + + /** @num_syncs: Amount of struct drm_xe_sync in array. */ + __u32 num_syncs; + + /** @syncs: Pointer to struct drm_xe_sync array. */ + __u64 syncs; + + /** + * @address: address of batch buffer if num_batch_buffer == 1 or an + * array of batch buffer addresses + */ + __u64 address; + + /** + * @num_batch_buffer: number of batch buffer in this exec, must match + * the width of the engine + */ + __u16 num_batch_buffer; + + /** @pad: MBZ */ + __u16 pad[3]; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +/** + * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE + * + * Wait on user fence, XE will wake-up on every HW engine interrupt in the + * instances list and check if user fence is complete:: + * + * (*addr & MASK) OP (VALUE & MASK) + * + * Returns to user on user fence completion or timeout. + * + * The @op can be: + * - %DRM_XE_UFENCE_WAIT_OP_EQ + * - %DRM_XE_UFENCE_WAIT_OP_NEQ + * - %DRM_XE_UFENCE_WAIT_OP_GT + * - %DRM_XE_UFENCE_WAIT_OP_GTE + * - %DRM_XE_UFENCE_WAIT_OP_LT + * - %DRM_XE_UFENCE_WAIT_OP_LTE + * + * and the @flags can be: + * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME + * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP + * + * The @mask values can be for example: + * - 0xffu for u8 + * - 0xffffu for u16 + * - 0xffffffffu for u32 + * - 0xffffffffffffffffu for u64 + */ +struct drm_xe_wait_user_fence { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** + * @addr: user pointer address to wait on, must qword aligned + */ + __u64 addr; + +#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0 +#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1 +#define DRM_XE_UFENCE_WAIT_OP_GT 0x2 +#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3 +#define DRM_XE_UFENCE_WAIT_OP_LT 0x4 +#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5 + /** @op: wait operation (type of comparison) */ + __u16 op; + +#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0) + /** @flags: wait flags */ + __u16 flags; + + /** @pad: MBZ */ + __u32 pad; + + /** @value: compare value */ + __u64 value; + + /** @mask: comparison mask */ + __u64 mask; + + /** + * @timeout: how long to wait before bailing, value in nanoseconds. + * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout) + * it contains timeout expressed in nanoseconds to wait (fence will + * expire at now() + timeout). + * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait + * will end at timeout (uses system MONOTONIC_CLOCK). + * Passing negative timeout leads to neverending wait. + * + * On relative timeout this value is updated with timeout left + * (for restarting the call in case of signal delivery). + * On absolute timeout this value stays intact (restarted call still + * expire at the same point of time). + */ + __s64 timeout; + + /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */ + __u32 exec_queue_id; + + /** @pad2: MBZ */ + __u32 pad2; + + /** @reserved: Reserved */ + __u64 reserved[2]; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _UAPI_XE_DRM_H_ */ diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 5f636b5afcd7..d44a8118b2ed 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -251,20 +251,22 @@ struct binder_extended_error { __s32 param; }; -#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) -#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) -#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) -#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) -#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) -#define BINDER_THREAD_EXIT _IOW('b', 8, __s32) -#define BINDER_VERSION _IOWR('b', 9, struct binder_version) -#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) -#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) -#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) -#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info) -#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info) -#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32) -#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error) +enum { + BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read), + BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64), + BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32), + BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32), + BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32), + BINDER_THREAD_EXIT = _IOW('b', 8, __s32), + BINDER_VERSION = _IOWR('b', 9, struct binder_version), + BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info), + BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref), + BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object), + BINDER_FREEZE = _IOW('b', 14, struct binder_freeze_info), + BINDER_GET_FROZEN_INFO = _IOWR('b', 15, struct binder_frozen_status_info), + BINDER_ENABLE_ONEWAY_SPAM_DETECTION = _IOW('b', 16, __u32), + BINDER_GET_EXTENDED_ERROR = _IOWR('b', 17, struct binder_extended_error), +}; /* * NOTE: Two special error codes you should check for when calling diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h index 9204e4494b25..6e25753015df 100644 --- a/include/uapi/linux/batadv_packet.h +++ b/include/uapi/linux/batadv_packet.h @@ -116,6 +116,9 @@ enum batadv_icmp_packettype { * only need routable IPv4 multicast packets we signed up for explicitly * @BATADV_MCAST_WANT_NO_RTR6: we have no IPv6 multicast router and therefore * only need routable IPv6 multicast packets we signed up for explicitly + * @BATADV_MCAST_HAVE_MC_PTYPE_CAPA: we can parse, receive and forward + * batman-adv multicast packets with a multicast tracker TVLV. And all our + * hard interfaces have an MTU of at least 1280 bytes. */ enum batadv_mcast_flags { BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0, @@ -123,6 +126,7 @@ enum batadv_mcast_flags { BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2, BATADV_MCAST_WANT_NO_RTR4 = 1UL << 3, BATADV_MCAST_WANT_NO_RTR6 = 1UL << 4, + BATADV_MCAST_HAVE_MC_PTYPE_CAPA = 1UL << 5, }; /* tt data subtypes */ @@ -174,14 +178,16 @@ enum batadv_bla_claimframe { * @BATADV_TVLV_TT: translation table tvlv * @BATADV_TVLV_ROAM: roaming advertisement tvlv * @BATADV_TVLV_MCAST: multicast capability tvlv + * @BATADV_TVLV_MCAST_TRACKER: multicast tracker tvlv */ enum batadv_tvlv_type { - BATADV_TVLV_GW = 0x01, - BATADV_TVLV_DAT = 0x02, - BATADV_TVLV_NC = 0x03, - BATADV_TVLV_TT = 0x04, - BATADV_TVLV_ROAM = 0x05, - BATADV_TVLV_MCAST = 0x06, + BATADV_TVLV_GW = 0x01, + BATADV_TVLV_DAT = 0x02, + BATADV_TVLV_NC = 0x03, + BATADV_TVLV_TT = 0x04, + BATADV_TVLV_ROAM = 0x05, + BATADV_TVLV_MCAST = 0x06, + BATADV_TVLV_MCAST_TRACKER = 0x07, }; #pragma pack(2) @@ -488,6 +494,25 @@ struct batadv_bcast_packet { }; /** + * struct batadv_mcast_packet - multicast packet for network payload + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header + * @reserved: reserved byte for alignment + * @tvlv_len: length of the appended tvlv buffer (in bytes) + */ +struct batadv_mcast_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 reserved; + __be16 tvlv_len; + /* "4 bytes boundary + 2 bytes" long to make the payload after the + * following ethernet header again 4 bytes boundary aligned + */ +}; + +/** * struct batadv_coded_packet - network coded packet * @packet_type: batman-adv packet type, part of the general header * @version: batman-adv protocol version, part of the general header @@ -628,6 +653,14 @@ struct batadv_tvlv_mcast_data { __u8 reserved[3]; }; +/** + * struct batadv_tvlv_mcast_tracker - payload of a multicast tracker tvlv + * @num_dests: number of subsequent destination originator MAC addresses + */ +struct batadv_tvlv_mcast_tracker { + __be16 num_dests; +}; + #pragma pack() #endif /* _UAPI_LINUX_BATADV_PACKET_H_ */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 8790b3962e4b..754e68ca8744 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -932,7 +932,14 @@ enum bpf_map_type { */ BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED, + /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs + * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE + + * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE + * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE + * deprecated. + */ + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_SK_STORAGE, @@ -1040,6 +1047,13 @@ enum bpf_attach_type { BPF_TCX_INGRESS, BPF_TCX_EGRESS, BPF_TRACE_UPROBE_MULTI, + BPF_CGROUP_UNIX_CONNECT, + BPF_CGROUP_UNIX_SENDMSG, + BPF_CGROUP_UNIX_RECVMSG, + BPF_CGROUP_UNIX_GETPEERNAME, + BPF_CGROUP_UNIX_GETSOCKNAME, + BPF_NETKIT_PRIMARY, + BPF_NETKIT_PEER, __MAX_BPF_ATTACH_TYPE }; @@ -1059,9 +1073,12 @@ enum bpf_link_type { BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, - MAX_BPF_LINK_TYPE, + BPF_LINK_TYPE_NETKIT = 13, + __MAX_BPF_LINK_TYPE, }; +#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE + enum bpf_perf_event_type { BPF_PERF_EVENT_UNSPEC = 0, BPF_PERF_EVENT_UPROBE = 1, @@ -1185,6 +1202,9 @@ enum bpf_perf_event_type { */ #define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6) +/* The verifier internal test flag. Behavior is undefined */ +#define BPF_F_TEST_REG_INVARIANTS (1U << 7) + /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ @@ -1644,6 +1664,13 @@ union bpf_attr { __u32 flags; __u32 pid; } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } netkit; }; } link_create; @@ -1962,7 +1989,9 @@ union bpf_attr { * performed again, if the helper is used in combination with * direct packet access. * Return - * 0 on success, or a negative error in case of failure. + * 0 on success, or a negative error in case of failure. Positive + * error indicates a potential drop or congestion in the target + * device. The particular positive error codes are not defined. * * u64 bpf_get_current_pid_tgid(void) * Description @@ -2695,8 +2724,8 @@ union bpf_attr { * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. - * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** - * and **BPF_CGROUP_INET6_CONNECT**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, + * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: @@ -2934,8 +2963,8 @@ union bpf_attr { * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. - * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** - * and **BPF_CGROUP_INET6_CONNECT**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, + * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **getsockopt()**. * It supports the same set of *optname*\ s that is supported by @@ -3255,6 +3284,11 @@ union bpf_attr { * and *params*->smac will not be set as output. A common * use case is to call **bpf_redirect_neigh**\ () after * doing **bpf_fib_lookup**\ (). + * **BPF_FIB_LOOKUP_SRC** + * Derive and set source IP addr in *params*->ipv{4,6}_src + * for the nexthop. If the src addr cannot be derived, + * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this + * case, *params*->dmac and *params*->smac are not set either. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -4488,6 +4522,8 @@ union bpf_attr { * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. + * Note: the user stack will only be populated if the *task* is + * the current task; all other tasks will return -EOPNOTSUPP. * To achieve this, the helper needs *task*, which is a valid * pointer to **struct task_struct**. To store the stacktrace, the * bpf program provides *buf* with a nonnegative *size*. @@ -4499,6 +4535,7 @@ union bpf_attr { * * **BPF_F_USER_STACK** * Collect a user space stack instead of a kernel stack. + * The *task* must be the current task. * **BPF_F_USER_BUILD_ID** * Collect buildid+offset instead of ips for user stack, * only valid if **BPF_F_USER_STACK** is also specified. @@ -5087,6 +5124,8 @@ union bpf_attr { * **BPF_F_TIMER_ABS** * Start the timer in absolute expire value instead of the * default relative one. + * **BPF_F_TIMER_CPU_PIN** + * Timer will be pinned to the CPU of the caller. * * Return * 0 on success. @@ -6523,8 +6562,19 @@ struct bpf_link_info { __aligned_u64 addrs; __u32 count; /* in/out: kprobe_multi function count */ __u32 flags; + __u64 missed; } kprobe_multi; struct { + __aligned_u64 path; + __aligned_u64 offsets; + __aligned_u64 ref_ctr_offsets; + __aligned_u64 cookies; + __u32 path_size; /* in/out: real path size on success, including zero byte */ + __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */ + __u32 flags; + __u32 pid; + } uprobe_multi; + struct { __u32 type; /* enum bpf_perf_event_type */ __u32 :32; union { @@ -6538,6 +6588,7 @@ struct bpf_link_info { __u32 name_len; __u32 offset; /* offset from func_name */ __u64 addr; + __u64 missed; } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */ struct { __aligned_u64 tp_name; /* in/out */ @@ -6553,6 +6604,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; }; } __attribute__((aligned(8))); @@ -6849,6 +6904,7 @@ enum { BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ BPF_TCP_NEW_SYN_RECV, + BPF_TCP_BOUND_INACTIVE, BPF_TCP_MAX_STATES /* Leave at the end! */ }; @@ -6951,6 +7007,7 @@ enum { BPF_FIB_LOOKUP_OUTPUT = (1U << 1), BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), BPF_FIB_LOOKUP_TBID = (1U << 3), + BPF_FIB_LOOKUP_SRC = (1U << 4), }; enum { @@ -6963,6 +7020,7 @@ enum { BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ + BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */ }; struct bpf_fib_lookup { @@ -6997,6 +7055,9 @@ struct bpf_fib_lookup { __u32 rt_metric; }; + /* input: source address to consider for lookup + * output: source address result from lookup + */ union { __be32 ipv4_src; __u32 ipv6_src[4]; /* in6_addr; network order */ @@ -7109,40 +7170,31 @@ struct bpf_spin_lock { }; struct bpf_timer { - __u64 :64; - __u64 :64; + __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_dynptr { - __u64 :64; - __u64 :64; + __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_list_head { - __u64 :64; - __u64 :64; + __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_list_node { - __u64 :64; - __u64 :64; - __u64 :64; + __u64 __opaque[3]; } __attribute__((aligned(8))); struct bpf_rb_root { - __u64 :64; - __u64 :64; + __u64 __opaque[2]; } __attribute__((aligned(8))); struct bpf_rb_node { - __u64 :64; - __u64 :64; - __u64 :64; - __u64 :64; + __u64 __opaque[4]; } __attribute__((aligned(8))); struct bpf_refcount { - __u32 :32; + __u32 __opaque[1]; } __attribute__((aligned(4))); struct bpf_sysctl { @@ -7298,9 +7350,11 @@ struct bpf_core_relo { * Flags to control bpf_timer_start() behaviour. * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is * relative to current time. + * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller. */ enum { BPF_F_TIMER_ABS = (1ULL << 0), + BPF_F_TIMER_CPU_PIN = (1ULL << 1), }; /* BPF numbers iterator state */ diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h deleted file mode 100644 index cbc1f5813f50..000000000000 --- a/include/uapi/linux/bpfilter.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _UAPI_LINUX_BPFILTER_H -#define _UAPI_LINUX_BPFILTER_H - -#include <linux/if.h> - -enum { - BPFILTER_IPT_SO_SET_REPLACE = 64, - BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65, - BPFILTER_IPT_SET_MAX, -}; - -enum { - BPFILTER_IPT_SO_GET_INFO = 64, - BPFILTER_IPT_SO_GET_ENTRIES = 65, - BPFILTER_IPT_SO_GET_REVISION_MATCH = 66, - BPFILTER_IPT_SO_GET_REVISION_TARGET = 67, - BPFILTER_IPT_GET_MAX, -}; - -#endif /* _UAPI_LINUX_BPFILTER_H */ diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index dbb8b96da50d..f8bc34a6bcfa 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -333,6 +333,8 @@ struct btrfs_ioctl_fs_info_args { #define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11) #define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12) #define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13) +#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14) +#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16) struct btrfs_ioctl_feature_flags { __u64 compat_flags; @@ -612,6 +614,9 @@ struct btrfs_ioctl_clone_range_args { */ #define BTRFS_DEFRAG_RANGE_COMPRESS 1 #define BTRFS_DEFRAG_RANGE_START_IO 2 +#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \ + BTRFS_DEFRAG_RANGE_START_IO) + struct btrfs_ioctl_defrag_range_args { /* start of the defrag operation */ __u64 start; @@ -753,6 +758,7 @@ struct btrfs_ioctl_get_dev_stats { #define BTRFS_QUOTA_CTL_ENABLE 1 #define BTRFS_QUOTA_CTL_DISABLE 2 #define BTRFS_QUOTA_CTL_RESCAN__NOTUSED 3 +#define BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA 4 struct btrfs_ioctl_quota_ctl_args { __u64 cmd; __u64 status; diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index fc3c32186d7e..d24e8e121507 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -73,6 +73,9 @@ /* Holds the block group items for extent tree v2. */ #define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL +/* Tracks RAID stripes in block groups. */ +#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL + /* device stats in the device tree */ #define BTRFS_DEV_STATS_OBJECTID 0ULL @@ -216,6 +219,22 @@ */ #define BTRFS_METADATA_ITEM_KEY 169 +/* + * Special inline ref key which stores the id of the subvolume which originally + * created the extent. This subvolume owns the extent permanently from the + * perspective of simple quotas. Needed to know which subvolume to free quota + * usage from when the extent is deleted. + * + * Stored as an inline ref rather to avoid wasting space on a separate item on + * top of the existing extent item. However, unlike the other inline refs, + * there is one one owner ref per extent rather than one per extent. + * + * Because of this, it goes at the front of the list of inline refs, and thus + * must have a lower type value than any other inline ref type (to satisfy the + * disk format rule that inline refs have non-decreasing type). + */ +#define BTRFS_EXTENT_OWNER_REF_KEY 172 + #define BTRFS_TREE_BLOCK_REF_KEY 176 #define BTRFS_EXTENT_DATA_REF_KEY 178 @@ -261,6 +280,8 @@ #define BTRFS_DEV_ITEM_KEY 216 #define BTRFS_CHUNK_ITEM_KEY 228 +#define BTRFS_RAID_STRIPE_KEY 230 + /* * Records the overall state of the qgroups. * There's only one instance of this key present, @@ -719,6 +740,30 @@ struct btrfs_free_space_header { __le64 num_bitmaps; } __attribute__ ((__packed__)); +struct btrfs_raid_stride { + /* The id of device this raid extent lives on. */ + __le64 devid; + /* The physical location on disk. */ + __le64 physical; +} __attribute__ ((__packed__)); + +/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */ +#define BTRFS_STRIPE_RAID0 1 +#define BTRFS_STRIPE_RAID1 2 +#define BTRFS_STRIPE_DUP 3 +#define BTRFS_STRIPE_RAID10 4 +#define BTRFS_STRIPE_RAID5 5 +#define BTRFS_STRIPE_RAID6 6 +#define BTRFS_STRIPE_RAID1C3 7 +#define BTRFS_STRIPE_RAID1C4 8 + +struct btrfs_stripe_extent { + __u8 encoding; + __u8 reserved[7]; + /* An array of raid strides this stripe is composed of. */ + struct btrfs_raid_stride strides[]; +} __attribute__ ((__packed__)); + #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) @@ -787,6 +832,10 @@ struct btrfs_shared_data_ref { __le32 count; } __attribute__ ((__packed__)); +struct btrfs_extent_owner_ref { + __le64 root_id; +} __attribute__ ((__packed__)); + struct btrfs_extent_inline_ref { __u8 type; __le64 offset; @@ -1204,9 +1253,17 @@ static inline __u16 btrfs_qgroup_level(__u64 qgroupid) */ #define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2) +/* + * Whether or not this filesystem is using simple quotas. Not exactly the + * incompat bit, because we support using simple quotas, disabling it, then + * going back to full qgroup quotas. + */ +#define BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE (1ULL << 3) + #define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \ BTRFS_QGROUP_STATUS_FLAG_RESCAN | \ - BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) + BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | \ + BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) #define BTRFS_QGROUP_STATUS_VERSION 1 @@ -1228,6 +1285,15 @@ struct btrfs_qgroup_status_item { * of the scan. It contains a logical address */ __le64 rescan; + + /* + * The generation when quotas were last enabled. Used by simple quotas to + * avoid decrementing when freeing an extent that was written before + * enable. + * + * Set only if flags contain BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE. + */ + __le64 enable_gen; } __attribute__ ((__packed__)); struct btrfs_qgroup_info_item { diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h index fc248ef00e86..008a691c254b 100644 --- a/include/uapi/linux/counter.h +++ b/include/uapi/linux/counter.h @@ -38,7 +38,7 @@ enum counter_scope { * * For example, if the Count 2 ceiling extension of Counter device 4 is desired, * set type equal to COUNTER_COMPONENT_EXTENSION, scope equal to - * COUNTER_COUNT_SCOPE, parent equal to 2, and id equal to the value provided by + * COUNTER_SCOPE_COUNT, parent equal to 2, and id equal to the value provided by * the respective /sys/bus/counter/devices/counter4/count2/ceiling_component_id * sysfs attribute. */ diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 14bc6e742148..42066f4eb890 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -46,6 +46,7 @@ ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \ ___DEPRECATED(SCAN_MEDIA, "Scan Media"), \ ___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \ + ___C(GET_TIMESTAMP, "Get Timestamp"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 03875e078be8..130cae0d3e20 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -139,6 +139,8 @@ enum devlink_command { DEVLINK_CMD_SELFTESTS_GET, /* can dump */ DEVLINK_CMD_SELFTESTS_RUN, + DEVLINK_CMD_NOTIFY_FILTER_SET, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -265,7 +267,7 @@ enum { * Documentation/networking/devlink/devlink-flash.rst * */ -enum { +enum devlink_flash_overwrite { DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT, DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT, @@ -680,6 +682,7 @@ enum devlink_port_function_attr { DEVLINK_PORT_FN_ATTR_STATE, /* u8 */ DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */ DEVLINK_PORT_FN_ATTR_CAPS, /* bitfield32 */ + DEVLINK_PORT_FN_ATTR_DEVLINK, /* nested */ __DEVLINK_PORT_FUNCTION_ATTR_MAX, DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1 diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h new file mode 100644 index 000000000000..b4e947f9bfbc --- /dev/null +++ b/include/uapi/linux/dpll.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/dpll.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_DPLL_H +#define _UAPI_LINUX_DPLL_H + +#define DPLL_FAMILY_NAME "dpll" +#define DPLL_FAMILY_VERSION 1 + +/** + * enum dpll_mode - working modes a dpll can support, differentiates if and how + * dpll selects one of its inputs to syntonize with it, valid values for + * DPLL_A_MODE attribute + * @DPLL_MODE_MANUAL: input can be only selected by sending a request to dpll + * @DPLL_MODE_AUTOMATIC: highest prio input pin auto selected by dpll + */ +enum dpll_mode { + DPLL_MODE_MANUAL = 1, + DPLL_MODE_AUTOMATIC, + + /* private: */ + __DPLL_MODE_MAX, + DPLL_MODE_MAX = (__DPLL_MODE_MAX - 1) +}; + +/** + * enum dpll_lock_status - provides information of dpll device lock status, + * valid values for DPLL_A_LOCK_STATUS attribute + * @DPLL_LOCK_STATUS_UNLOCKED: dpll was not yet locked to any valid input (or + * forced by setting DPLL_A_MODE to DPLL_MODE_DETACHED) + * @DPLL_LOCK_STATUS_LOCKED: dpll is locked to a valid signal, but no holdover + * available + * @DPLL_LOCK_STATUS_LOCKED_HO_ACQ: dpll is locked and holdover acquired + * @DPLL_LOCK_STATUS_HOLDOVER: dpll is in holdover state - lost a valid lock or + * was forced by disconnecting all the pins (latter possible only when dpll + * lock-state was already DPLL_LOCK_STATUS_LOCKED_HO_ACQ, if dpll lock-state + * was not DPLL_LOCK_STATUS_LOCKED_HO_ACQ, the dpll's lock-state shall remain + * DPLL_LOCK_STATUS_UNLOCKED) + */ +enum dpll_lock_status { + DPLL_LOCK_STATUS_UNLOCKED = 1, + DPLL_LOCK_STATUS_LOCKED, + DPLL_LOCK_STATUS_LOCKED_HO_ACQ, + DPLL_LOCK_STATUS_HOLDOVER, + + /* private: */ + __DPLL_LOCK_STATUS_MAX, + DPLL_LOCK_STATUS_MAX = (__DPLL_LOCK_STATUS_MAX - 1) +}; + +#define DPLL_TEMP_DIVIDER 1000 + +/** + * enum dpll_type - type of dpll, valid values for DPLL_A_TYPE attribute + * @DPLL_TYPE_PPS: dpll produces Pulse-Per-Second signal + * @DPLL_TYPE_EEC: dpll drives the Ethernet Equipment Clock + */ +enum dpll_type { + DPLL_TYPE_PPS = 1, + DPLL_TYPE_EEC, + + /* private: */ + __DPLL_TYPE_MAX, + DPLL_TYPE_MAX = (__DPLL_TYPE_MAX - 1) +}; + +/** + * enum dpll_pin_type - defines possible types of a pin, valid values for + * DPLL_A_PIN_TYPE attribute + * @DPLL_PIN_TYPE_MUX: aggregates another layer of selectable pins + * @DPLL_PIN_TYPE_EXT: external input + * @DPLL_PIN_TYPE_SYNCE_ETH_PORT: ethernet port PHY's recovered clock + * @DPLL_PIN_TYPE_INT_OSCILLATOR: device internal oscillator + * @DPLL_PIN_TYPE_GNSS: GNSS recovered clock + */ +enum dpll_pin_type { + DPLL_PIN_TYPE_MUX = 1, + DPLL_PIN_TYPE_EXT, + DPLL_PIN_TYPE_SYNCE_ETH_PORT, + DPLL_PIN_TYPE_INT_OSCILLATOR, + DPLL_PIN_TYPE_GNSS, + + /* private: */ + __DPLL_PIN_TYPE_MAX, + DPLL_PIN_TYPE_MAX = (__DPLL_PIN_TYPE_MAX - 1) +}; + +/** + * enum dpll_pin_direction - defines possible direction of a pin, valid values + * for DPLL_A_PIN_DIRECTION attribute + * @DPLL_PIN_DIRECTION_INPUT: pin used as a input of a signal + * @DPLL_PIN_DIRECTION_OUTPUT: pin used to output the signal + */ +enum dpll_pin_direction { + DPLL_PIN_DIRECTION_INPUT = 1, + DPLL_PIN_DIRECTION_OUTPUT, + + /* private: */ + __DPLL_PIN_DIRECTION_MAX, + DPLL_PIN_DIRECTION_MAX = (__DPLL_PIN_DIRECTION_MAX - 1) +}; + +#define DPLL_PIN_FREQUENCY_1_HZ 1 +#define DPLL_PIN_FREQUENCY_10_KHZ 10000 +#define DPLL_PIN_FREQUENCY_77_5_KHZ 77500 +#define DPLL_PIN_FREQUENCY_10_MHZ 10000000 + +/** + * enum dpll_pin_state - defines possible states of a pin, valid values for + * DPLL_A_PIN_STATE attribute + * @DPLL_PIN_STATE_CONNECTED: pin connected, active input of phase locked loop + * @DPLL_PIN_STATE_DISCONNECTED: pin disconnected, not considered as a valid + * input + * @DPLL_PIN_STATE_SELECTABLE: pin enabled for automatic input selection + */ +enum dpll_pin_state { + DPLL_PIN_STATE_CONNECTED = 1, + DPLL_PIN_STATE_DISCONNECTED, + DPLL_PIN_STATE_SELECTABLE, + + /* private: */ + __DPLL_PIN_STATE_MAX, + DPLL_PIN_STATE_MAX = (__DPLL_PIN_STATE_MAX - 1) +}; + +/** + * enum dpll_pin_capabilities - defines possible capabilities of a pin, valid + * flags on DPLL_A_PIN_CAPABILITIES attribute + * @DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE: pin direction can be changed + * @DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE: pin priority can be changed + * @DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE: pin state can be changed + */ +enum dpll_pin_capabilities { + DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE = 1, + DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE = 2, + DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE = 4, +}; + +#define DPLL_PHASE_OFFSET_DIVIDER 1000 + +enum dpll_a { + DPLL_A_ID = 1, + DPLL_A_MODULE_NAME, + DPLL_A_PAD, + DPLL_A_CLOCK_ID, + DPLL_A_MODE, + DPLL_A_MODE_SUPPORTED, + DPLL_A_LOCK_STATUS, + DPLL_A_TEMP, + DPLL_A_TYPE, + + __DPLL_A_MAX, + DPLL_A_MAX = (__DPLL_A_MAX - 1) +}; + +enum dpll_a_pin { + DPLL_A_PIN_ID = 1, + DPLL_A_PIN_PARENT_ID, + DPLL_A_PIN_MODULE_NAME, + DPLL_A_PIN_PAD, + DPLL_A_PIN_CLOCK_ID, + DPLL_A_PIN_BOARD_LABEL, + DPLL_A_PIN_PANEL_LABEL, + DPLL_A_PIN_PACKAGE_LABEL, + DPLL_A_PIN_TYPE, + DPLL_A_PIN_DIRECTION, + DPLL_A_PIN_FREQUENCY, + DPLL_A_PIN_FREQUENCY_SUPPORTED, + DPLL_A_PIN_FREQUENCY_MIN, + DPLL_A_PIN_FREQUENCY_MAX, + DPLL_A_PIN_PRIO, + DPLL_A_PIN_STATE, + DPLL_A_PIN_CAPABILITIES, + DPLL_A_PIN_PARENT_DEVICE, + DPLL_A_PIN_PARENT_PIN, + DPLL_A_PIN_PHASE_ADJUST_MIN, + DPLL_A_PIN_PHASE_ADJUST_MAX, + DPLL_A_PIN_PHASE_ADJUST, + DPLL_A_PIN_PHASE_OFFSET, + DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET, + + __DPLL_A_PIN_MAX, + DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1) +}; + +enum dpll_cmd { + DPLL_CMD_DEVICE_ID_GET = 1, + DPLL_CMD_DEVICE_GET, + DPLL_CMD_DEVICE_SET, + DPLL_CMD_DEVICE_CREATE_NTF, + DPLL_CMD_DEVICE_DELETE_NTF, + DPLL_CMD_DEVICE_CHANGE_NTF, + DPLL_CMD_PIN_ID_GET, + DPLL_CMD_PIN_GET, + DPLL_CMD_PIN_SET, + DPLL_CMD_PIN_CREATE_NTF, + DPLL_CMD_PIN_DELETE_NTF, + DPLL_CMD_PIN_CHANGE_NTF, + + __DPLL_CMD_MAX, + DPLL_CMD_MAX = (__DPLL_CMD_MAX - 1) +}; + +#define DPLL_MCGRP_MONITOR "monitor" + +#endif /* _UAPI_LINUX_DPLL_H */ diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index ee0bcff14b69..9417309b7230 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -140,7 +140,7 @@ typedef __s64 Elf64_Sxword; #define ELF64_ST_BIND(x) ELF_ST_BIND(x) #define ELF64_ST_TYPE(x) ELF_ST_TYPE(x) -typedef struct dynamic { +typedef struct { Elf32_Sword d_tag; union { Elf32_Sword d_val; @@ -445,6 +445,8 @@ typedef struct elf64_shdr { #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ #define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */ #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ +#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */ +#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */ #define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */ #define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */ #define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index f7fba0dc87e5..06ef6b78b7de 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1266,6 +1266,8 @@ struct ethtool_rxfh_indir { * hardware hash key. * @hfunc: Defines the current RSS hash function used by HW (or to be set to). * Valid values are one of the %ETH_RSS_HASH_*. + * @input_xfrm: Defines how the input data is transformed. Valid values are one + * of %RXH_XFRM_*. * @rsvd8: Reserved for future use; see the note on reserved space. * @rsvd32: Reserved for future use; see the note on reserved space. * @rss_config: RX ring/queue index for each hash value i.e., indirection table @@ -1285,7 +1287,8 @@ struct ethtool_rxfh { __u32 indir_size; __u32 key_size; __u8 hfunc; - __u8 rsvd8[3]; + __u8 input_xfrm; + __u8 rsvd8[2]; __u32 rsvd32; __u32 rss_config[]; }; @@ -1992,6 +1995,15 @@ static inline int ethtool_validate_duplex(__u8 duplex) #define WOL_MODE_COUNT 8 +/* RSS hash function data + * XOR the corresponding source and destination fields of each specified + * protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH + * calculation. Note that this XORing reduces the input set entropy and could + * be exploited to reduce the RSS queue spread. + */ +#define RXH_XFRM_SYM_XOR (1 << 0) +#define RXH_XFRM_NO_CHANGE 0xff + /* L2-L4 network traffic flow types */ #define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ #define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ @@ -2128,18 +2140,6 @@ enum ethtool_reset_flags { * refused. For drivers: ignore this field (use kernel's * __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will * be overwritten by kernel. - * @supported: Bitmap with each bit meaning given by - * %ethtool_link_mode_bit_indices for the link modes, physical - * connectors and other link features for which the interface - * supports autonegotiation or auto-detection. Read-only. - * @advertising: Bitmap with each bit meaning given by - * %ethtool_link_mode_bit_indices for the link modes, physical - * connectors and other link features that are advertised through - * autonegotiation or enabled for auto-detection. - * @lp_advertising: Bitmap with each bit meaning given by - * %ethtool_link_mode_bit_indices for the link modes, and other - * link features that the link partner advertised through - * autonegotiation; 0 if unknown or not applicable. Read-only. * @transceiver: Used to distinguish different possible PHY types, * reported consistently by PHYLIB. Read-only. * @master_slave_cfg: Master/slave port mode. @@ -2181,6 +2181,21 @@ enum ethtool_reset_flags { * %set_link_ksettings() should validate all fields other than @cmd * and @link_mode_masks_nwords that are not described as read-only or * deprecated, and must ignore all fields described as read-only. + * + * @link_mode_masks is divided into three bitfields, each of length + * @link_mode_masks_nwords: + * - supported: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, physical + * connectors and other link features for which the interface + * supports autonegotiation or auto-detection. Read-only. + * - advertising: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, physical + * connectors and other link features that are advertised through + * autonegotiation or enabled for auto-detection. + * - lp_advertising: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, and other + * link features that the link partner advertised through + * autonegotiation; 0 if unknown or not applicable. Read-only. */ struct ethtool_link_settings { __u32 cmd; diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 73e2c10dc2cc..3f89074aa06c 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -908,6 +908,7 @@ enum { ETHTOOL_A_RSS_HFUNC, /* u32 */ ETHTOOL_A_RSS_INDIR, /* binary */ ETHTOOL_A_RSS_HKEY, /* binary */ + ETHTOOL_A_RSS_INPUT_XFRM, /* u32 */ __ETHTOOL_A_RSS_CNT, ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1), diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index 6c80f96049bd..282e90aeb163 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -116,5 +116,8 @@ #define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to compare object identity and may not be usable to open_by_handle_at(2) */ +#if defined(__KERNEL__) +#define AT_GETATTR_NOSEC 0x80000000 +#endif #endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index b7b56871029c..48ad69f7722e 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -305,4 +305,64 @@ typedef int __bitwise __kernel_rwf_t; #define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ RWF_APPEND) +/* Pagemap ioctl */ +#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg) + +/* Bitmasks provided in pm_scan_args masks and reported in page_region.categories. */ +#define PAGE_IS_WPALLOWED (1 << 0) +#define PAGE_IS_WRITTEN (1 << 1) +#define PAGE_IS_FILE (1 << 2) +#define PAGE_IS_PRESENT (1 << 3) +#define PAGE_IS_SWAPPED (1 << 4) +#define PAGE_IS_PFNZERO (1 << 5) +#define PAGE_IS_HUGE (1 << 6) +#define PAGE_IS_SOFT_DIRTY (1 << 7) + +/* + * struct page_region - Page region with flags + * @start: Start of the region + * @end: End of the region (exclusive) + * @categories: PAGE_IS_* category bitmask for the region + */ +struct page_region { + __u64 start; + __u64 end; + __u64 categories; +}; + +/* Flags for PAGEMAP_SCAN ioctl */ +#define PM_SCAN_WP_MATCHING (1 << 0) /* Write protect the pages matched. */ +#define PM_SCAN_CHECK_WPASYNC (1 << 1) /* Abort the scan when a non-WP-enabled page is found. */ + +/* + * struct pm_scan_arg - Pagemap ioctl argument + * @size: Size of the structure + * @flags: Flags for the IOCTL + * @start: Starting address of the region + * @end: Ending address of the region + * @walk_end Address where the scan stopped (written by kernel). + * walk_end == end (address tags cleared) informs that the scan completed on entire range. + * @vec: Address of page_region struct array for output + * @vec_len: Length of the page_region struct array + * @max_pages: Optional limit for number of returned pages (0 = disabled) + * @category_inverted: PAGE_IS_* categories which values match if 0 instead of 1 + * @category_mask: Skip pages for which any category doesn't match + * @category_anyof_mask: Skip pages for which no category matches + * @return_mask: PAGE_IS_* categories that are to be reported in `page_region`s returned + */ +struct pm_scan_arg { + __u64 size; + __u64 flags; + __u64 start; + __u64 end; + __u64 walk_end; + __u64 vec; + __u64 vec_len; + __u64 max_pages; + __u64 category_inverted; + __u64 category_mask; + __u64 category_anyof_mask; + __u64 return_mask; +}; + #endif /* _UAPI_LINUX_FS_H */ diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index fd1fb0d5389d..7a8f4c290187 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -71,7 +71,8 @@ struct fscrypt_policy_v2 { __u8 contents_encryption_mode; __u8 filenames_encryption_mode; __u8 flags; - __u8 __reserved[4]; + __u8 log2_data_unit_size; + __u8 __reserved[3]; __u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; }; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index b3fcab13fcd3..e7418d15fe39 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -207,6 +207,10 @@ * - add FUSE_EXT_GROUPS * - add FUSE_CREATE_SUPP_GROUP * - add FUSE_HAS_EXPIRE_ONLY + * + * 7.39 + * - add FUSE_DIRECT_IO_ALLOW_MMAP + * - add FUSE_STATX and related structures */ #ifndef _LINUX_FUSE_H @@ -242,7 +246,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 38 +#define FUSE_KERNEL_MINOR_VERSION 39 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -269,6 +273,40 @@ struct fuse_attr { uint32_t flags; }; +/* + * The following structures are bit-for-bit compatible with the statx(2) ABI in + * Linux. + */ +struct fuse_sx_time { + int64_t tv_sec; + uint32_t tv_nsec; + int32_t __reserved; +}; + +struct fuse_statx { + uint32_t mask; + uint32_t blksize; + uint64_t attributes; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint16_t mode; + uint16_t __spare0[1]; + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t attributes_mask; + struct fuse_sx_time atime; + struct fuse_sx_time btime; + struct fuse_sx_time ctime; + struct fuse_sx_time mtime; + uint32_t rdev_major; + uint32_t rdev_minor; + uint32_t dev_major; + uint32_t dev_minor; + uint64_t __spare2[14]; +}; + struct fuse_kstatfs { uint64_t blocks; uint64_t bfree; @@ -371,6 +409,7 @@ struct fuse_file_lock { * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir, * symlink and mknod (single group that matches parent) * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation + * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -409,6 +448,10 @@ struct fuse_file_lock { #define FUSE_HAS_INODE_DAX (1ULL << 33) #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) +#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) + +/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ +#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP /** * CUSE INIT request/reply flags @@ -575,6 +618,7 @@ enum fuse_opcode { FUSE_REMOVEMAPPING = 49, FUSE_SYNCFS = 50, FUSE_TMPFILE = 51, + FUSE_STATX = 52, /* CUSE specific operations */ CUSE_INIT = 4096, @@ -639,6 +683,22 @@ struct fuse_attr_out { struct fuse_attr attr; }; +struct fuse_statx_in { + uint32_t getattr_flags; + uint32_t reserved; + uint64_t fh; + uint32_t sx_flags; + uint32_t sx_mask; +}; + +struct fuse_statx_out { + uint64_t attr_valid; /* Cache timeout for the attributes */ + uint32_t attr_valid_nsec; + uint32_t flags; + uint64_t spare[2]; + struct fuse_statx stat; +}; + #define FUSE_COMPAT_MKNOD_IN_SIZE 8 struct fuse_mknod_in { diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index 71a5df8d2689..d2ee625ea189 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -44,10 +44,35 @@ FUTEX_PRIVATE_FLAG) /* - * Flags to specify the bit length of the futex word for futex2 syscalls. - * Currently, only 32 is supported. + * Flags for futex2 syscalls. + * + * NOTE: these are not pure flags, they can also be seen as: + * + * union { + * u32 flags; + * struct { + * u32 size : 2, + * numa : 1, + * : 4, + * private : 1; + * }; + * }; */ -#define FUTEX_32 2 +#define FUTEX2_SIZE_U8 0x00 +#define FUTEX2_SIZE_U16 0x01 +#define FUTEX2_SIZE_U32 0x02 +#define FUTEX2_SIZE_U64 0x03 +#define FUTEX2_NUMA 0x04 + /* 0x08 */ + /* 0x10 */ + /* 0x20 */ + /* 0x40 */ +#define FUTEX2_PRIVATE FUTEX_PRIVATE_FLAG + +#define FUTEX2_SIZE_MASK 0x03 + +/* do not use */ +#define FUTEX_32 FUTEX2_SIZE_U32 /* historical accident :-( */ /* * Max numbers of elements in a futex_waitv array diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h index 4c878d84dbda..3a93f17ca943 100644 --- a/include/uapi/linux/gsmmux.h +++ b/include/uapi/linux/gsmmux.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (c) 2022/23 Siemens Mobility GmbH */ #ifndef _LINUX_GSMMUX_H #define _LINUX_GSMMUX_H diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index 2f61298a7b77..3dcdb9e33cba 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -33,6 +33,6 @@ enum gtp_attrs { GTPA_PAD, __GTPA_MAX, }; -#define GTPA_MAX (__GTPA_MAX + 1) +#define GTPA_MAX (__GTPA_MAX - 1) #endif /* _UAPI_LINUX_GTP_H_ */ diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h index 74a8609fcb4d..0af23ec196d8 100644 --- a/include/uapi/linux/hash_info.h +++ b/include/uapi/linux/hash_info.h @@ -35,6 +35,9 @@ enum hash_algo { HASH_ALGO_SM3_256, HASH_ALGO_STREEBOG_256, HASH_ALGO_STREEBOG_512, + HASH_ALGO_SHA3_256, + HASH_ALGO_SHA3_384, + HASH_ALGO_SHA3_512, HASH_ALGO__LAST }; diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 606b52e88ce3..3d1987e1bb2d 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -31,6 +31,7 @@ enum idxd_scmd_stat { IDXD_SCMD_WQ_IRQ_ERR = 0x80100000, IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000, IDXD_SCMD_DEV_EVL_ERR = 0x80120000, + IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000, }; #define IDXD_SCMD_SOFTERR_MASK 0x80000000 diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index f95326fce6bb..a5b743a2f775 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -723,6 +723,24 @@ enum { }; #define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1) +/* [MDBA_GET_ENTRY] = { + * struct br_mdb_entry + * [MDBA_GET_ENTRY_ATTRS] = { + * [MDBE_ATTR_SOURCE] + * struct in_addr / struct in6_addr + * [MDBE_ATTR_SRC_VNI] + * u32 + * } + * } + */ +enum { + MDBA_GET_ENTRY_UNSPEC, + MDBA_GET_ENTRY, + MDBA_GET_ENTRY_ATTRS, + __MDBA_GET_ENTRY_MAX, +}; +#define MDBA_GET_ENTRY_MAX (__MDBA_GET_ENTRY_MAX - 1) + /* [MDBA_SET_ENTRY_ATTRS] = { * [MDBE_ATTR_xxx] * ... @@ -739,6 +757,7 @@ enum { MDBE_ATTR_VNI, MDBE_ATTR_IFINDEX, MDBE_ATTR_SRC_VNI, + MDBE_ATTR_STATE_MASK, __MDBE_ATTR_MAX, }; #define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ce3117df9cec..ab9bcff96e4d 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -376,7 +376,7 @@ enum { IFLA_GSO_IPV4_MAX_SIZE, IFLA_GRO_IPV4_MAX_SIZE, - + IFLA_DPLL_PIN, __IFLA_MAX }; @@ -461,6 +461,286 @@ enum in6_addr_gen_mode { /* Bridge section */ +/** + * DOC: Bridge enum definition + * + * Please *note* that the timer values in the following section are expected + * in clock_t format, which is seconds multiplied by USER_HZ (generally + * defined as 100). + * + * @IFLA_BR_FORWARD_DELAY + * The bridge forwarding delay is the time spent in LISTENING state + * (before moving to LEARNING) and in LEARNING state (before moving + * to FORWARDING). Only relevant if STP is enabled. + * + * The valid values are between (2 * USER_HZ) and (30 * USER_HZ). + * The default value is (15 * USER_HZ). + * + * @IFLA_BR_HELLO_TIME + * The time between hello packets sent by the bridge, when it is a root + * bridge or a designated bridge. Only relevant if STP is enabled. + * + * The valid values are between (1 * USER_HZ) and (10 * USER_HZ). + * The default value is (2 * USER_HZ). + * + * @IFLA_BR_MAX_AGE + * The hello packet timeout is the time until another bridge in the + * spanning tree is assumed to be dead, after reception of its last hello + * message. Only relevant if STP is enabled. + * + * The valid values are between (6 * USER_HZ) and (40 * USER_HZ). + * The default value is (20 * USER_HZ). + * + * @IFLA_BR_AGEING_TIME + * Configure the bridge's FDB entries aging time. It is the time a MAC + * address will be kept in the FDB after a packet has been received from + * that address. After this time has passed, entries are cleaned up. + * Allow values outside the 802.1 standard specification for special cases: + * + * * 0 - entry never ages (all permanent) + * * 1 - entry disappears (no persistence) + * + * The default value is (300 * USER_HZ). + * + * @IFLA_BR_STP_STATE + * Turn spanning tree protocol on (*IFLA_BR_STP_STATE* > 0) or off + * (*IFLA_BR_STP_STATE* == 0) for this bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_PRIORITY + * Set this bridge's spanning tree priority, used during STP root bridge + * election. + * + * The valid values are between 0 and 65535. + * + * @IFLA_BR_VLAN_FILTERING + * Turn VLAN filtering on (*IFLA_BR_VLAN_FILTERING* > 0) or off + * (*IFLA_BR_VLAN_FILTERING* == 0). When disabled, the bridge will not + * consider the VLAN tag when handling packets. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_VLAN_PROTOCOL + * Set the protocol used for VLAN filtering. + * + * The valid values are 0x8100(802.1Q) or 0x88A8(802.1AD). The default value + * is 0x8100(802.1Q). + * + * @IFLA_BR_GROUP_FWD_MASK + * The group forwarding mask. This is the bitmask that is applied to + * decide whether to forward incoming frames destined to link-local + * addresses (of the form 01:80:C2:00:00:0X). + * + * The default value is 0, which means the bridge does not forward any + * link-local frames coming on this port. + * + * @IFLA_BR_ROOT_ID + * The bridge root id, read only. + * + * @IFLA_BR_BRIDGE_ID + * The bridge id, read only. + * + * @IFLA_BR_ROOT_PORT + * The bridge root port, read only. + * + * @IFLA_BR_ROOT_PATH_COST + * The bridge root path cost, read only. + * + * @IFLA_BR_TOPOLOGY_CHANGE + * The bridge topology change, read only. + * + * @IFLA_BR_TOPOLOGY_CHANGE_DETECTED + * The bridge topology change detected, read only. + * + * @IFLA_BR_HELLO_TIMER + * The bridge hello timer, read only. + * + * @IFLA_BR_TCN_TIMER + * The bridge tcn timer, read only. + * + * @IFLA_BR_TOPOLOGY_CHANGE_TIMER + * The bridge topology change timer, read only. + * + * @IFLA_BR_GC_TIMER + * The bridge gc timer, read only. + * + * @IFLA_BR_GROUP_ADDR + * Set the MAC address of the multicast group this bridge uses for STP. + * The address must be a link-local address in standard Ethernet MAC address + * format. It is an address of the form 01:80:C2:00:00:0X, with X in [0, 4..f]. + * + * The default value is 0. + * + * @IFLA_BR_FDB_FLUSH + * Flush bridge's fdb dynamic entries. + * + * @IFLA_BR_MCAST_ROUTER + * Set bridge's multicast router if IGMP snooping is enabled. + * The valid values are: + * + * * 0 - disabled. + * * 1 - automatic (queried). + * * 2 - permanently enabled. + * + * The default value is 1. + * + * @IFLA_BR_MCAST_SNOOPING + * Turn multicast snooping on (*IFLA_BR_MCAST_SNOOPING* > 0) or off + * (*IFLA_BR_MCAST_SNOOPING* == 0). + * + * The default value is 1. + * + * @IFLA_BR_MCAST_QUERY_USE_IFADDR + * If enabled use the bridge's own IP address as source address for IGMP + * queries (*IFLA_BR_MCAST_QUERY_USE_IFADDR* > 0) or the default of 0.0.0.0 + * (*IFLA_BR_MCAST_QUERY_USE_IFADDR* == 0). + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MCAST_QUERIER + * Enable (*IFLA_BR_MULTICAST_QUERIER* > 0) or disable + * (*IFLA_BR_MULTICAST_QUERIER* == 0) IGMP querier, ie sending of multicast + * queries by the bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MCAST_HASH_ELASTICITY + * Set multicast database hash elasticity, It is the maximum chain length in + * the multicast hash table. This attribute is *deprecated* and the value + * is always 16. + * + * @IFLA_BR_MCAST_HASH_MAX + * Set maximum size of the multicast hash table + * + * The default value is 4096, the value must be a power of 2. + * + * @IFLA_BR_MCAST_LAST_MEMBER_CNT + * The Last Member Query Count is the number of Group-Specific Queries + * sent before the router assumes there are no local members. The Last + * Member Query Count is also the number of Group-and-Source-Specific + * Queries sent before the router assumes there are no listeners for a + * particular source. + * + * The default value is 2. + * + * @IFLA_BR_MCAST_STARTUP_QUERY_CNT + * The Startup Query Count is the number of Queries sent out on startup, + * separated by the Startup Query Interval. + * + * The default value is 2. + * + * @IFLA_BR_MCAST_LAST_MEMBER_INTVL + * The Last Member Query Interval is the Max Response Time inserted into + * Group-Specific Queries sent in response to Leave Group messages, and + * is also the amount of time between Group-Specific Query messages. + * + * The default value is (1 * USER_HZ). + * + * @IFLA_BR_MCAST_MEMBERSHIP_INTVL + * The interval after which the bridge will leave a group, if no membership + * reports for this group are received. + * + * The default value is (260 * USER_HZ). + * + * @IFLA_BR_MCAST_QUERIER_INTVL + * The interval between queries sent by other routers. if no queries are + * seen after this delay has passed, the bridge will start to send its own + * queries (as if *IFLA_BR_MCAST_QUERIER_INTVL* was enabled). + * + * The default value is (255 * USER_HZ). + * + * @IFLA_BR_MCAST_QUERY_INTVL + * The Query Interval is the interval between General Queries sent by + * the Querier. + * + * The default value is (125 * USER_HZ). The minimum value is (1 * USER_HZ). + * + * @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL + * The Max Response Time used to calculate the Max Resp Code inserted + * into the periodic General Queries. + * + * The default value is (10 * USER_HZ). + * + * @IFLA_BR_MCAST_STARTUP_QUERY_INTVL + * The interval between queries in the startup phase. + * + * The default value is (125 * USER_HZ) / 4. The minimum value is (1 * USER_HZ). + * + * @IFLA_BR_NF_CALL_IPTABLES + * Enable (*NF_CALL_IPTABLES* > 0) or disable (*NF_CALL_IPTABLES* == 0) + * iptables hooks on the bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_NF_CALL_IP6TABLES + * Enable (*NF_CALL_IP6TABLES* > 0) or disable (*NF_CALL_IP6TABLES* == 0) + * ip6tables hooks on the bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_NF_CALL_ARPTABLES + * Enable (*NF_CALL_ARPTABLES* > 0) or disable (*NF_CALL_ARPTABLES* == 0) + * arptables hooks on the bridge. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_VLAN_DEFAULT_PVID + * VLAN ID applied to untagged and priority-tagged incoming packets. + * + * The default value is 1. Setting to the special value 0 makes all ports of + * this bridge not have a PVID by default, which means that they will + * not accept VLAN-untagged traffic. + * + * @IFLA_BR_PAD + * Bridge attribute padding type for netlink message. + * + * @IFLA_BR_VLAN_STATS_ENABLED + * Enable (*IFLA_BR_VLAN_STATS_ENABLED* == 1) or disable + * (*IFLA_BR_VLAN_STATS_ENABLED* == 0) per-VLAN stats accounting. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MCAST_STATS_ENABLED + * Enable (*IFLA_BR_MCAST_STATS_ENABLED* > 0) or disable + * (*IFLA_BR_MCAST_STATS_ENABLED* == 0) multicast (IGMP/MLD) stats + * accounting. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MCAST_IGMP_VERSION + * Set the IGMP version. + * + * The valid values are 2 and 3. The default value is 2. + * + * @IFLA_BR_MCAST_MLD_VERSION + * Set the MLD version. + * + * The valid values are 1 and 2. The default value is 1. + * + * @IFLA_BR_VLAN_STATS_PER_PORT + * Enable (*IFLA_BR_VLAN_STATS_PER_PORT* == 1) or disable + * (*IFLA_BR_VLAN_STATS_PER_PORT* == 0) per-VLAN per-port stats accounting. + * Can be changed only when there are no port VLANs configured. + * + * The default value is 0 (disabled). + * + * @IFLA_BR_MULTI_BOOLOPT + * The multi_boolopt is used to control new boolean options to avoid adding + * new netlink attributes. You can look at ``enum br_boolopt_id`` for those + * options. + * + * @IFLA_BR_MCAST_QUERIER_STATE + * Bridge mcast querier states, read only. + * + * @IFLA_BR_FDB_N_LEARNED + * The number of dynamically learned FDB entries for the current bridge, + * read only. + * + * @IFLA_BR_FDB_MAX_LEARNED + * Set the number of max dynamically learned FDB entries for the current + * bridge. + */ enum { IFLA_BR_UNSPEC, IFLA_BR_FORWARD_DELAY, @@ -510,6 +790,8 @@ enum { IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, IFLA_BR_MCAST_QUERIER_STATE, + IFLA_BR_FDB_N_LEARNED, + IFLA_BR_FDB_MAX_LEARNED, __IFLA_BR_MAX, }; @@ -520,11 +802,252 @@ struct ifla_bridge_id { __u8 addr[6]; /* ETH_ALEN */ }; +/** + * DOC: Bridge mode enum definition + * + * @BRIDGE_MODE_HAIRPIN + * Controls whether traffic may be sent back out of the port on which it + * was received. This option is also called reflective relay mode, and is + * used to support basic VEPA (Virtual Ethernet Port Aggregator) + * capabilities. By default, this flag is turned off and the bridge will + * not forward traffic back out of the receiving port. + */ enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, }; +/** + * DOC: Bridge port enum definition + * + * @IFLA_BRPORT_STATE + * The operation state of the port. Here are the valid values. + * + * * 0 - port is in STP *DISABLED* state. Make this port completely + * inactive for STP. This is also called BPDU filter and could be used + * to disable STP on an untrusted port, like a leaf virtual device. + * The traffic forwarding is also stopped on this port. + * * 1 - port is in STP *LISTENING* state. Only valid if STP is enabled + * on the bridge. In this state the port listens for STP BPDUs and + * drops all other traffic frames. + * * 2 - port is in STP *LEARNING* state. Only valid if STP is enabled on + * the bridge. In this state the port will accept traffic only for the + * purpose of updating MAC address tables. + * * 3 - port is in STP *FORWARDING* state. Port is fully active. + * * 4 - port is in STP *BLOCKING* state. Only valid if STP is enabled on + * the bridge. This state is used during the STP election process. + * In this state, port will only process STP BPDUs. + * + * @IFLA_BRPORT_PRIORITY + * The STP port priority. The valid values are between 0 and 255. + * + * @IFLA_BRPORT_COST + * The STP path cost of the port. The valid values are between 1 and 65535. + * + * @IFLA_BRPORT_MODE + * Set the bridge port mode. See *BRIDGE_MODE_HAIRPIN* for more details. + * + * @IFLA_BRPORT_GUARD + * Controls whether STP BPDUs will be processed by the bridge port. By + * default, the flag is turned off to allow BPDU processing. Turning this + * flag on will disable the bridge port if a STP BPDU packet is received. + * + * If the bridge has Spanning Tree enabled, hostile devices on the network + * may send BPDU on a port and cause network failure. Setting *guard on* + * will detect and stop this by disabling the port. The port will be + * restarted if the link is brought down, or removed and reattached. + * + * @IFLA_BRPORT_PROTECT + * Controls whether a given port is allowed to become a root port or not. + * Only used when STP is enabled on the bridge. By default the flag is off. + * + * This feature is also called root port guard. If BPDU is received from a + * leaf (edge) port, it should not be elected as root port. This could + * be used if using STP on a bridge and the downstream bridges are not fully + * trusted; this prevents a hostile guest from rerouting traffic. + * + * @IFLA_BRPORT_FAST_LEAVE + * This flag allows the bridge to immediately stop multicast traffic + * forwarding on a port that receives an IGMP Leave message. It is only used + * when IGMP snooping is enabled on the bridge. By default the flag is off. + * + * @IFLA_BRPORT_LEARNING + * Controls whether a given port will learn *source* MAC addresses from + * received traffic or not. Also controls whether dynamic FDB entries + * (which can also be added by software) will be refreshed by incoming + * traffic. By default this flag is on. + * + * @IFLA_BRPORT_UNICAST_FLOOD + * Controls whether unicast traffic for which there is no FDB entry will + * be flooded towards this port. By default this flag is on. + * + * @IFLA_BRPORT_PROXYARP + * Enable proxy ARP on this port. + * + * @IFLA_BRPORT_LEARNING_SYNC + * Controls whether a given port will sync MAC addresses learned on device + * port to bridge FDB. + * + * @IFLA_BRPORT_PROXYARP_WIFI + * Enable proxy ARP on this port which meets extended requirements by + * IEEE 802.11 and Hotspot 2.0 specifications. + * + * @IFLA_BRPORT_ROOT_ID + * + * @IFLA_BRPORT_BRIDGE_ID + * + * @IFLA_BRPORT_DESIGNATED_PORT + * + * @IFLA_BRPORT_DESIGNATED_COST + * + * @IFLA_BRPORT_ID + * + * @IFLA_BRPORT_NO + * + * @IFLA_BRPORT_TOPOLOGY_CHANGE_ACK + * + * @IFLA_BRPORT_CONFIG_PENDING + * + * @IFLA_BRPORT_MESSAGE_AGE_TIMER + * + * @IFLA_BRPORT_FORWARD_DELAY_TIMER + * + * @IFLA_BRPORT_HOLD_TIMER + * + * @IFLA_BRPORT_FLUSH + * Flush bridge ports' fdb dynamic entries. + * + * @IFLA_BRPORT_MULTICAST_ROUTER + * Configure the port's multicast router presence. A port with + * a multicast router will receive all multicast traffic. + * The valid values are: + * + * * 0 disable multicast routers on this port + * * 1 let the system detect the presence of routers (default) + * * 2 permanently enable multicast traffic forwarding on this port + * * 3 enable multicast routers temporarily on this port, not depending + * on incoming queries. + * + * @IFLA_BRPORT_PAD + * + * @IFLA_BRPORT_MCAST_FLOOD + * Controls whether a given port will flood multicast traffic for which + * there is no MDB entry. By default this flag is on. + * + * @IFLA_BRPORT_MCAST_TO_UCAST + * Controls whether a given port will replicate packets using unicast + * instead of multicast. By default this flag is off. + * + * This is done by copying the packet per host and changing the multicast + * destination MAC to a unicast one accordingly. + * + * *mcast_to_unicast* works on top of the multicast snooping feature of the + * bridge. Which means unicast copies are only delivered to hosts which + * are interested in unicast and signaled this via IGMP/MLD reports previously. + * + * This feature is intended for interface types which have a more reliable + * and/or efficient way to deliver unicast packets than broadcast ones + * (e.g. WiFi). + * + * However, it should only be enabled on interfaces where no IGMPv2/MLDv1 + * report suppression takes place. IGMP/MLD report suppression issue is + * usually overcome by the network daemon (supplicant) enabling AP isolation + * and by that separating all STAs. + * + * Delivery of STA-to-STA IP multicast is made possible again by enabling + * and utilizing the bridge hairpin mode, which considers the incoming port + * as a potential outgoing port, too (see *BRIDGE_MODE_HAIRPIN* option). + * Hairpin mode is performed after multicast snooping, therefore leading + * to only deliver reports to STAs running a multicast router. + * + * @IFLA_BRPORT_VLAN_TUNNEL + * Controls whether vlan to tunnel mapping is enabled on the port. + * By default this flag is off. + * + * @IFLA_BRPORT_BCAST_FLOOD + * Controls flooding of broadcast traffic on the given port. By default + * this flag is on. + * + * @IFLA_BRPORT_GROUP_FWD_MASK + * Set the group forward mask. This is a bitmask that is applied to + * decide whether to forward incoming frames destined to link-local + * addresses. The addresses of the form are 01:80:C2:00:00:0X (defaults + * to 0, which means the bridge does not forward any link-local frames + * coming on this port). + * + * @IFLA_BRPORT_NEIGH_SUPPRESS + * Controls whether neighbor discovery (arp and nd) proxy and suppression + * is enabled on the port. By default this flag is off. + * + * @IFLA_BRPORT_ISOLATED + * Controls whether a given port will be isolated, which means it will be + * able to communicate with non-isolated ports only. By default this + * flag is off. + * + * @IFLA_BRPORT_BACKUP_PORT + * Set a backup port. If the port loses carrier all traffic will be + * redirected to the configured backup port. Set the value to 0 to disable + * it. + * + * @IFLA_BRPORT_MRP_RING_OPEN + * + * @IFLA_BRPORT_MRP_IN_OPEN + * + * @IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + * The number of per-port EHT hosts limit. The default value is 512. + * Setting to 0 is not allowed. + * + * @IFLA_BRPORT_MCAST_EHT_HOSTS_CNT + * The current number of tracked hosts, read only. + * + * @IFLA_BRPORT_LOCKED + * Controls whether a port will be locked, meaning that hosts behind the + * port will not be able to communicate through the port unless an FDB + * entry with the unit's MAC address is in the FDB. The common use case is + * that hosts are allowed access through authentication with the IEEE 802.1X + * protocol or based on whitelists. By default this flag is off. + * + * Please note that secure 802.1X deployments should always use the + * *BR_BOOLOPT_NO_LL_LEARN* flag, to not permit the bridge to populate its + * FDB based on link-local (EAPOL) traffic received on the port. + * + * @IFLA_BRPORT_MAB + * Controls whether a port will use MAC Authentication Bypass (MAB), a + * technique through which select MAC addresses may be allowed on a locked + * port, without using 802.1X authentication. Packets with an unknown source + * MAC address generates a "locked" FDB entry on the incoming bridge port. + * The common use case is for user space to react to these bridge FDB + * notifications and optionally replace the locked FDB entry with a normal + * one, allowing traffic to pass for whitelisted MAC addresses. + * + * Setting this flag also requires *IFLA_BRPORT_LOCKED* and + * *IFLA_BRPORT_LEARNING*. *IFLA_BRPORT_LOCKED* ensures that unauthorized + * data packets are dropped, and *IFLA_BRPORT_LEARNING* allows the dynamic + * FDB entries installed by user space (as replacements for the locked FDB + * entries) to be refreshed and/or aged out. + * + * @IFLA_BRPORT_MCAST_N_GROUPS + * + * @IFLA_BRPORT_MCAST_MAX_GROUPS + * Sets the maximum number of MDB entries that can be registered for a + * given port. Attempts to register more MDB entries at the port than this + * limit allows will be rejected, whether they are done through netlink + * (e.g. the bridge tool), or IGMP or MLD membership reports. Setting a + * limit of 0 disables the limit. The default value is 0. + * + * @IFLA_BRPORT_NEIGH_VLAN_SUPPRESS + * Controls whether neighbor discovery (arp and nd) proxy and suppression is + * enabled for a given port. By default this flag is off. + * + * Note that this option only takes effect when *IFLA_BRPORT_NEIGH_SUPPRESS* + * is enabled for a given port. + * + * @IFLA_BRPORT_BACKUP_NHID + * The FDB nexthop object ID to attach to packets being redirected to a + * backup port that has VLAN tunnel mapping enabled (via the + * *IFLA_BRPORT_VLAN_TUNNEL* option). Setting a value of 0 (default) has + * the effect of not attaching any ID. + */ enum { IFLA_BRPORT_UNSPEC, IFLA_BRPORT_STATE, /* Spanning tree state */ @@ -756,6 +1279,30 @@ struct tunnel_msg { __u32 ifindex; }; +/* netkit section */ +enum netkit_action { + NETKIT_NEXT = -1, + NETKIT_PASS = 0, + NETKIT_DROP = 2, + NETKIT_REDIRECT = 7, +}; + +enum netkit_mode { + NETKIT_L2, + NETKIT_L3, +}; + +enum { + IFLA_NETKIT_UNSPEC, + IFLA_NETKIT_PEER_INFO, + IFLA_NETKIT_PRIMARY, + IFLA_NETKIT_POLICY, + IFLA_NETKIT_PEER_POLICY, + IFLA_NETKIT_MODE, + __IFLA_NETKIT_MAX, +}; +#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) + /* VXLAN section */ /* include statistics in the dump */ @@ -830,6 +1377,7 @@ enum { IFLA_VXLAN_DF, IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ IFLA_VXLAN_LOCALBYPASS, + IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */ __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -847,6 +1395,13 @@ enum ifla_vxlan_df { VXLAN_DF_MAX = __VXLAN_DF_END - 1, }; +enum ifla_vxlan_label_policy { + VXLAN_LABEL_FIXED = 0, + VXLAN_LABEL_INHERIT = 1, + __VXLAN_LABEL_END, + VXLAN_LABEL_MAX = __VXLAN_LABEL_END - 1, +}; + /* GENEVE section */ enum { IFLA_GENEVE_UNSPEC, @@ -1392,7 +1947,9 @@ enum { enum { IFLA_DSA_UNSPEC, - IFLA_DSA_MASTER, + IFLA_DSA_CONDUIT, + /* Deprecated, use IFLA_DSA_CONDUIT instead */ + IFLA_DSA_MASTER = IFLA_DSA_CONDUIT, __IFLA_DSA_MAX, }; diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index 4d0ad22f83b5..9efc42382fdb 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -18,11 +18,7 @@ struct sockaddr_ll { unsigned short sll_hatype; unsigned char sll_pkttype; unsigned char sll_halen; - union { - unsigned char sll_addr[8]; - /* Actual length is in sll_halen. */ - __DECLARE_FLEX_ARRAY(unsigned char, sll_addr_flex); - }; + unsigned char sll_addr[8]; }; /* Packet types */ diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index 8d48863472b9..d31698410410 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -33,7 +33,13 @@ #define XDP_USE_SG (1 << 4) /* Flags for xsk_umem_config flags */ -#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) +#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) + +/* Force checksum calculation in software. Can be used for testing or + * working around potential HW issues. This option causes performance + * degradation and only works in XDP_COPY mode. + */ +#define XDP_UMEM_TX_SW_CSUM (1 << 1) struct sockaddr_xdp { __u16 sxdp_family; @@ -76,6 +82,7 @@ struct xdp_umem_reg { __u32 chunk_size; __u32 headroom; __u32 flags; + __u32 tx_metadata_len; }; struct xdp_statistics { @@ -105,6 +112,41 @@ struct xdp_options { #define XSK_UNALIGNED_BUF_ADDR_MASK \ ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1) +/* Request transmit timestamp. Upon completion, put it into tx_timestamp + * field of struct xsk_tx_metadata. + */ +#define XDP_TXMD_FLAGS_TIMESTAMP (1 << 0) + +/* Request transmit checksum offload. Checksum start position and offset + * are communicated via csum_start and csum_offset fields of struct + * xsk_tx_metadata. + */ +#define XDP_TXMD_FLAGS_CHECKSUM (1 << 1) + +/* AF_XDP offloads request. 'request' union member is consumed by the driver + * when the packet is being transmitted. 'completion' union member is + * filled by the driver when the transmit completion arrives. + */ +struct xsk_tx_metadata { + __u64 flags; + + union { + struct { + /* XDP_TXMD_FLAGS_CHECKSUM */ + + /* Offset from desc->addr where checksumming should start. */ + __u16 csum_start; + /* Offset from csum_start where checksum should be stored. */ + __u16 csum_offset; + } request; + + struct { + /* XDP_TXMD_FLAGS_TIMESTAMP */ + __u64 tx_timestamp; + } completion; + }; +}; + /* Rx/Tx descriptor */ struct xdp_desc { __u64 addr; @@ -121,4 +163,7 @@ struct xdp_desc { */ #define XDP_PKT_CONTD (1 << 0) +/* TX packet carries valid metadata. */ +#define XDP_TX_METADATA (1 << 1) + #endif /* _LINUX_IF_XDP_H */ diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index c79f2f046a0b..5060963707b1 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -47,6 +47,10 @@ enum iio_chan_type { IIO_POSITIONRELATIVE, IIO_PHASE, IIO_MASSCONCENTRATION, + IIO_DELTA_ANGL, + IIO_DELTA_VELOCITY, + IIO_COLORTEMP, + IIO_CHROMATICITY, }; enum iio_modifier { @@ -87,6 +91,8 @@ enum iio_modifier { IIO_MOD_CO2, IIO_MOD_VOC, IIO_MOD_LIGHT_UV, + IIO_MOD_LIGHT_UVA, + IIO_MOD_LIGHT_UVB, IIO_MOD_LIGHT_DUV, IIO_MOD_PM1, IIO_MOD_PM2P5, diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8e61f8b7c2ce..7a673b52827b 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -43,6 +43,10 @@ struct io_uring_sqe { union { __u64 addr; /* pointer to buffer or iovecs */ __u64 splice_off_in; + struct { + __u32 level; + __u32 optname; + }; }; __u32 len; /* buffer size or number of iovecs */ union { @@ -65,6 +69,9 @@ struct io_uring_sqe { __u32 xattr_flags; __u32 msg_ring_flags; __u32 uring_cmd_flags; + __u32 waitid_flags; + __u32 futex_flags; + __u32 install_fd_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -79,6 +86,7 @@ struct io_uring_sqe { union { __s32 splice_fd_in; __u32 file_index; + __u32 optlen; struct { __u16 addr_len; __u16 __pad3[1]; @@ -89,6 +97,7 @@ struct io_uring_sqe { __u64 addr3; __u64 __pad2[1]; }; + __u64 optval; /* * If the ring is initialized with IORING_SETUP_SQE128, then * this field is used for 80 bytes of arbitrary command data @@ -240,19 +249,24 @@ enum io_uring_op { IORING_OP_URING_CMD, IORING_OP_SEND_ZC, IORING_OP_SENDMSG_ZC, + IORING_OP_READ_MULTISHOT, + IORING_OP_WAITID, + IORING_OP_FUTEX_WAIT, + IORING_OP_FUTEX_WAKE, + IORING_OP_FUTEX_WAITV, + IORING_OP_FIXED_FD_INSTALL, /* this goes last, obviously */ IORING_OP_LAST, }; /* - * sqe->uring_cmd_flags + * sqe->uring_cmd_flags top 8bits aren't available for userspace * IORING_URING_CMD_FIXED use registered buffer; pass this flag * along with setting sqe->buf_index. - * IORING_URING_CMD_POLLED driver use only */ #define IORING_URING_CMD_FIXED (1U << 0) -#define IORING_URING_CMD_POLLED (1U << 31) +#define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED /* @@ -375,6 +389,13 @@ enum { #define IORING_MSG_RING_FLAGS_PASS (1U << 1) /* + * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags) + * + * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC + */ +#define IORING_FIXED_FD_NO_CLOEXEC (1U << 0) + +/* * IO completion data structure (Completion Queue Entry) */ struct io_uring_cqe { @@ -546,6 +567,9 @@ enum { /* register a range of fixed file slots for automatic slot allocation */ IORING_REGISTER_FILE_ALLOC_RANGE = 25, + /* return status information for a buffer group */ + IORING_REGISTER_PBUF_STATUS = 26, + /* this goes last */ IORING_REGISTER_LAST, @@ -672,6 +696,13 @@ struct io_uring_buf_reg { __u64 resv[3]; }; +/* argument for IORING_REGISTER_PBUF_STATUS */ +struct io_uring_buf_status { + __u32 buf_group; /* input */ + __u32 head; /* output */ + __u32 resv[8]; +}; + /* * io_uring_restriction->opcode values */ @@ -734,6 +765,8 @@ struct io_uring_recvmsg_out { enum { SOCKET_URING_OP_SIOCINQ = 0, SOCKET_URING_OP_SIOCOUTQ, + SOCKET_URING_OP_GETSOCKOPT, + SOCKET_URING_OP_SETSOCKOPT, }; #ifdef __cplusplus diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index b4ba0c0cbab6..1dfeaa2e649e 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -47,6 +47,9 @@ enum { IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, + IOMMUFD_CMD_HWPT_INVALIDATE, }; /** @@ -348,19 +351,85 @@ struct iommu_vfio_ioas { #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) /** + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as + * the parent HWPT in a nesting configuration. + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is + * enforced on device attachment + */ +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, +}; + +/** + * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table + * entry attributes + * @IOMMU_VTD_S1_SRE: Supervisor request + * @IOMMU_VTD_S1_EAFE: Extended access enable + * @IOMMU_VTD_S1_WPE: Write protect enable + */ +enum iommu_hwpt_vtd_s1_flags { + IOMMU_VTD_S1_SRE = 1 << 0, + IOMMU_VTD_S1_EAFE = 1 << 1, + IOMMU_VTD_S1_WPE = 1 << 2, +}; + +/** + * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table + * info (IOMMU_HWPT_DATA_VTD_S1) + * @flags: Combination of enum iommu_hwpt_vtd_s1_flags + * @pgtbl_addr: The base address of the stage-1 page table. + * @addr_width: The address width of the stage-1 page table + * @__reserved: Must be 0 + */ +struct iommu_hwpt_vtd_s1 { + __aligned_u64 flags; + __aligned_u64 pgtbl_addr; + __u32 addr_width; + __u32 __reserved; +}; + +/** + * enum iommu_hwpt_data_type - IOMMU HWPT Data Type + * @IOMMU_HWPT_DATA_NONE: no data + * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table + */ +enum iommu_hwpt_data_type { + IOMMU_HWPT_DATA_NONE, + IOMMU_HWPT_DATA_VTD_S1, +}; + +/** * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC) * @size: sizeof(struct iommu_hwpt_alloc) - * @flags: Must be 0 + * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for - * @pt_id: The IOAS to connect this HWPT to + * @pt_id: The IOAS or HWPT to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT * @__reserved: Must be 0 + * @data_type: One of enum iommu_hwpt_data_type + * @data_len: Length of the type specific data + * @data_uptr: User pointer to the type specific data * * Explicitly allocate a hardware page table object. This is the same object * type that is returned by iommufd_device_attach() and represents the * underlying iommu driver's iommu_domain kernel object. * - * A HWPT will be created with the IOVA mappings from the given IOAS. + * A kernel-managed HWPT will be created with the mappings from the given + * IOAS via the @pt_id. The @data_type for this allocation must be set to + * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a + * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags. + * + * A user-managed nested HWPT will be created from a given parent HWPT via + * @pt_id, in which the parent HWPT must be allocated previously via the + * same ioctl from a given IOAS (@pt_id). In this case, the @data_type + * must be set to a pre-defined type corresponding to an I/O page table + * type supported by the underlying IOMMU hardware. + * + * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and + * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr + * must be given. */ struct iommu_hwpt_alloc { __u32 size; @@ -369,13 +438,26 @@ struct iommu_hwpt_alloc { __u32 pt_id; __u32 out_hwpt_id; __u32 __reserved; + __u32 data_type; + __u32 data_len; + __aligned_u64 data_uptr; }; #define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC) /** + * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info + * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings + * on a nested_parent domain. + * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html + */ +enum iommu_hw_info_vtd_flags { + IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0, +}; + +/** * struct iommu_hw_info_vtd - Intel VT-d hardware information * - * @flags: Must be 0 + * @flags: Combination of enum iommu_hw_info_vtd_flags * @__reserved: Must be 0 * * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec @@ -405,6 +487,20 @@ enum iommu_hw_info_type { }; /** + * enum iommufd_hw_capabilities + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking + * If available, it means the following APIs + * are supported: + * + * IOMMU_HWPT_GET_DIRTY_BITMAP + * IOMMU_HWPT_SET_DIRTY_TRACKING + * + */ +enum iommufd_hw_capabilities { + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, +}; + +/** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) * @flags: Must be 0 @@ -415,6 +511,8 @@ enum iommu_hw_info_type { * the iommu type specific hardware information data * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. + * @out_capabilities: Output the generic iommu capability info type as defined + * in the enum iommu_hw_capabilities. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -439,6 +537,159 @@ struct iommu_hw_info { __aligned_u64 data_uptr; __u32 out_data_type; __u32 __reserved; + __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) + +/* + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty + * tracking + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking + */ +enum iommufd_hwpt_set_dirty_tracking_flags { + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1, +}; + +/** + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING) + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking) + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @__reserved: Must be 0 + * + * Toggle dirty tracking on an HW pagetable. + */ +struct iommu_hwpt_set_dirty_tracking { + __u32 size; + __u32 flags; + __u32 hwpt_id; + __u32 __reserved; +}; +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) + +/** + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing + * any dirty bits metadata. This flag + * can be passed in the expectation + * where the next operation is an unmap + * of the same IOVA range. + * + */ +enum iommufd_hwpt_get_dirty_bitmap_flags { + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1, +}; + +/** + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags + * @__reserved: Must be 0 + * @iova: base IOVA of the bitmap first bit + * @length: IOVA range size + * @page_size: page size granularity of each bit in the bitmap + * @data: bitmap where to set the dirty bits. The bitmap bits each + * represent a page_size which you deviate from an arbitrary iova. + * + * Checking a given IOVA is dirty: + * + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64)) + * + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap + * with the dirty IOVAs. In doing so it will also by default clear any + * dirty bit metadata set in the IOPTE. + */ +struct iommu_hwpt_get_dirty_bitmap { + __u32 size; + __u32 hwpt_id; + __u32 flags; + __u32 __reserved; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 data; +}; +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + +/** + * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation + * Data Type + * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1 + */ +enum iommu_hwpt_invalidate_data_type { + IOMMU_HWPT_INVALIDATE_DATA_VTD_S1, +}; + +/** + * enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d + * stage-1 cache invalidation + * @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies + * to all-levels page structure cache or just + * the leaf PTE cache. + */ +enum iommu_hwpt_vtd_s1_invalidate_flags { + IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0, +}; + +/** + * struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation + * (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) + * @addr: The start address of the range to be invalidated. It needs to + * be 4KB aligned. + * @npages: Number of contiguous 4K pages to be invalidated. + * @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags + * @__reserved: Must be 0 + * + * The Intel VT-d specific invalidation data for user-managed stage-1 cache + * invalidation in nested translation. Userspace uses this structure to + * tell the impacted cache scope after modifying the stage-1 page table. + * + * Invalidating all the caches related to the page table by setting @addr + * to be 0 and @npages to be U64_MAX. + * + * The device TLB will be invalidated automatically if ATS is enabled. + */ +struct iommu_hwpt_vtd_s1_invalidate { + __aligned_u64 addr; + __aligned_u64 npages; + __u32 flags; + __u32 __reserved; +}; + +/** + * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE) + * @size: sizeof(struct iommu_hwpt_invalidate) + * @hwpt_id: ID of a nested HWPT for cache invalidation + * @data_uptr: User pointer to an array of driver-specific cache invalidation + * data. + * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data + * type of all the entries in the invalidation request array. It + * should be a type supported by the hwpt pointed by @hwpt_id. + * @entry_len: Length (in bytes) of a request entry in the request array + * @entry_num: Input the number of cache invalidation requests in the array. + * Output the number of requests successfully handled by kernel. + * @__reserved: Must be 0. + * + * Invalidate the iommu cache for user-managed page table. Modifications on a + * user-managed page table should be followed by this operation to sync cache. + * Each ioctl can support one or more cache invalidation requests in the array + * that has a total size of @entry_len * @entry_num. + * + * An empty invalidation request array by setting @entry_num==0 is allowed, and + * @entry_len and @data_uptr would be ignored in this case. This can be used to + * check if the given @data_type is supported or not by kernel. + */ +struct iommu_hwpt_invalidate { + __u32 size; + __u32 hwpt_id; + __aligned_u64 data_uptr; + __u32 data_type; + __u32 entry_len; + __u32 entry_num; + __u32 __reserved; +}; +#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE) #endif diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 01766dd839b0..c17bb096ea68 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -25,6 +25,7 @@ #define KEXEC_FILE_UNLOAD 0x00000001 #define KEXEC_FILE_ON_CRASH 0x00000002 #define KEXEC_FILE_NO_INITRAMFS 0x00000004 +#define KEXEC_FILE_DEBUG 0x00000008 /* These values match the ELF architecture values. * Unless there is a good reason that should continue to be the case. diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index eeb2fdcbdcb7..f0ed68974c54 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -405,6 +405,7 @@ struct kfd_ioctl_acquire_vm_args { #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26) #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25) +#define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24) /* Allocate memory for later SVM (shared virtual memory) mapping. * @@ -659,6 +660,8 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 /* Keep GPU memory mapping always valid as if XNACK is disable */ #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 +/* Fine grained coherency between all devices using device-scope atomics */ +#define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080 /** * kfd_ioctl_svm_op - SVM ioctl operations diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 13065dd96132..c3308536482b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -16,76 +16,6 @@ #define KVM_API_VERSION 12 -/* *** Deprecated interfaces *** */ - -#define KVM_TRC_SHIFT 16 - -#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT) -#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) - -#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01) -#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02) -#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01) - -#define KVM_TRC_HEAD_SIZE 12 -#define KVM_TRC_CYCLE_SIZE 8 -#define KVM_TRC_EXTRA_MAX 7 - -#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) -#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) -#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) -#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) -#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) -#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) -#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) -#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) -#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) -#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) -#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) -#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) -#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) -#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) -#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) -#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) -#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) -#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) -#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) -#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) -#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16) -#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17) -#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) -#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) - -struct kvm_user_trace_setup { - __u32 buf_size; - __u32 buf_nr; -}; - -#define __KVM_DEPRECATED_MAIN_W_0x06 \ - _IOW(KVMIO, 0x06, struct kvm_user_trace_setup) -#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07) -#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08) - -#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq) - -struct kvm_breakpoint { - __u32 enabled; - __u32 padding; - __u64 address; -}; - -struct kvm_debug_guest { - __u32 enabled; - __u32 pad; - struct kvm_breakpoint breakpoints[4]; - __u32 singlestep; -}; - -#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest) - -/* *** End of deprecated interfaces *** */ - - /* for KVM_SET_USER_MEMORY_REGION */ struct kvm_userspace_memory_region { __u32 slot; @@ -95,6 +25,19 @@ struct kvm_userspace_memory_region { __u64 userspace_addr; /* start of the userspace allocated memory */ }; +/* for KVM_SET_USER_MEMORY_REGION2 */ +struct kvm_userspace_memory_region2 { + __u32 slot; + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; + __u64 userspace_addr; + __u64 guest_memfd_offset; + __u32 guest_memfd; + __u32 pad1; + __u64 pad2[14]; +}; + /* * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for * userspace, other bits are reserved for kvm internal use which are defined @@ -102,6 +45,7 @@ struct kvm_userspace_memory_region { */ #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) #define KVM_MEM_READONLY (1UL << 1) +#define KVM_MEM_GUEST_MEMFD (1UL << 2) /* for KVM_IRQ_LINE */ struct kvm_irq_level { @@ -264,6 +208,8 @@ struct kvm_xen_exit { #define KVM_EXIT_RISCV_SBI 35 #define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_NOTIFY 37 +#define KVM_EXIT_LOONGARCH_IOCSR 38 +#define KVM_EXIT_MEMORY_FAULT 39 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -336,6 +282,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_LOONGARCH_IOCSR */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; /* KVM_EXIT_HYPERCALL */ struct { __u64 nr; @@ -510,6 +463,13 @@ struct kvm_run { #define KVM_NOTIFY_CONTEXT_INVALID (1 << 0) __u32 flags; } notify; + /* KVM_EXIT_MEMORY_FAULT */ + struct { +#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3) + __u64 flags; + __u64 gpa; + __u64 size; + } memory_fault; /* Fix the size of the union. */ char padding[256]; }; @@ -937,9 +897,6 @@ struct kvm_ppc_resize_hpt { */ #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) -#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06 -#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 -#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) #define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) @@ -1192,6 +1149,12 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_COUNTER_OFFSET 227 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 +#define KVM_CAP_USER_MEMORY2 231 +#define KVM_CAP_MEMORY_FAULT_INFO 232 +#define KVM_CAP_MEMORY_ATTRIBUTES 233 +#define KVM_CAP_GUEST_MEMFD 234 +#define KVM_CAP_VM_TYPES 235 #ifdef KVM_CAP_IRQ_ROUTING @@ -1282,6 +1245,7 @@ struct kvm_x86_mce { #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6) +#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7) struct kvm_xen_hvm_config { __u32 flags; @@ -1362,6 +1326,7 @@ struct kvm_dirty_tlb { #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_RISCV 0x8000000000000000ULL +#define KVM_REG_LOONGARCH 0x9000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL @@ -1473,6 +1438,8 @@ struct kvm_vfio_spapr_tce { struct kvm_userspace_memory_region) #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) +#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \ + struct kvm_userspace_memory_region2) /* enable ucontrol for s390 */ struct kvm_s390_ucas_mapping { @@ -1497,20 +1464,8 @@ struct kvm_s390_ucas_mapping { _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) #define KVM_UNREGISTER_COALESCED_MMIO \ _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) -#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ - struct kvm_assigned_pci_dev) #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) -/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ -#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70 -#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) -#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ - struct kvm_assigned_pci_dev) -#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \ - struct kvm_assigned_msix_nr) -#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \ - struct kvm_assigned_msix_entry) -#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) @@ -1527,9 +1482,6 @@ struct kvm_s390_ucas_mapping { * KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */ #define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2) #define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3) -/* Available with KVM_CAP_PCI_2_3 */ -#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \ - struct kvm_assigned_pci_dev) /* Available with KVM_CAP_SIGNAL_MSI */ #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi) /* Available with KVM_CAP_PPC_GET_SMMU_INFO */ @@ -1562,6 +1514,7 @@ struct kvm_s390_ucas_mapping { #define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags) /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) @@ -1581,8 +1534,6 @@ struct kvm_s390_ucas_mapping { #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) -/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ -#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87 #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) @@ -2256,4 +2207,24 @@ struct kvm_s390_zpci_op { /* flags for kvm_s390_zpci_op->u.reg_aen.flags */ #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) +/* Available with KVM_CAP_MEMORY_ATTRIBUTES */ +#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes) + +struct kvm_memory_attributes { + __u64 address; + __u64 size; + __u64 attributes; + __u64 flags; +}; + +#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3) + +#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd) + +struct kvm_create_guest_memfd { + __u64 size; + __u64 flags; + __u64 reserved[6]; +}; + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 81d09ef9aa50..25c8d7677539 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -31,6 +31,12 @@ struct landlock_ruleset_attr { * this access right. */ __u64 handled_access_fs; + /** + * @handled_access_net: Bitmask of actions (cf. `Network flags`_) + * that is handled by this ruleset and should then be forbidden if no + * rule explicitly allow them. + */ + __u64 handled_access_net; }; /* @@ -54,6 +60,11 @@ enum landlock_rule_type { * landlock_path_beneath_attr . */ LANDLOCK_RULE_PATH_BENEATH = 1, + /** + * @LANDLOCK_RULE_NET_PORT: Type of a &struct + * landlock_net_port_attr . + */ + LANDLOCK_RULE_NET_PORT, }; /** @@ -80,6 +91,31 @@ struct landlock_path_beneath_attr { } __attribute__((packed)); /** + * struct landlock_net_port_attr - Network port definition + * + * Argument of sys_landlock_add_rule(). + */ +struct landlock_net_port_attr { + /** + * @allowed_access: Bitmask of allowed access network for a port + * (cf. `Network flags`_). + */ + __u64 allowed_access; + /** + * @port: Network port in host endianness. + * + * It should be noted that port 0 passed to :manpage:`bind(2)` will + * bind to an available port from a specific port range. This can be + * configured thanks to the ``/proc/sys/net/ipv4/ip_local_port_range`` + * sysctl (also used for IPv6). A Landlock rule with port 0 and the + * ``LANDLOCK_ACCESS_NET_BIND_TCP`` right means that requesting to bind + * on port 0 is allowed and it will automatically translate to binding + * on the related port range. + */ + __u64 port; +}; + +/** * DOC: fs_access * * A set of actions on kernel objects may be defined by an attribute (e.g. @@ -189,4 +225,23 @@ struct landlock_path_beneath_attr { #define LANDLOCK_ACCESS_FS_TRUNCATE (1ULL << 14) /* clang-format on */ +/** + * DOC: net_access + * + * Network flags + * ~~~~~~~~~~~~~~~~ + * + * These flags enable to restrict a sandboxed process to a set of network + * actions. This is supported since the Landlock ABI version 4. + * + * TCP sockets with allowed actions: + * + * - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind a TCP socket to a local port. + * - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect an active TCP socket to + * a remote port. + */ +/* clang-format off */ +#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0) +#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1) +/* clang-format on */ #endif /* _UAPI_LINUX_LANDLOCK_H */ diff --git a/include/uapi/linux/lsm.h b/include/uapi/linux/lsm.h new file mode 100644 index 000000000000..f8aef9ade549 --- /dev/null +++ b/include/uapi/linux/lsm.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Linux Security Modules (LSM) - User space API + * + * Copyright (C) 2022 Casey Schaufler <casey@schaufler-ca.com> + * Copyright (C) 2022 Intel Corporation + */ + +#ifndef _UAPI_LINUX_LSM_H +#define _UAPI_LINUX_LSM_H + +#include <linux/stddef.h> +#include <linux/types.h> +#include <linux/unistd.h> + +/** + * struct lsm_ctx - LSM context information + * @id: the LSM id number, see LSM_ID_XXX + * @flags: LSM specific flags + * @len: length of the lsm_ctx struct, @ctx and any other data or padding + * @ctx_len: the size of @ctx + * @ctx: the LSM context value + * + * The @len field MUST be equal to the size of the lsm_ctx struct + * plus any additional padding and/or data placed after @ctx. + * + * In all cases @ctx_len MUST be equal to the length of @ctx. + * If @ctx is a string value it should be nul terminated with + * @ctx_len equal to `strlen(@ctx) + 1`. Binary values are + * supported. + * + * The @flags and @ctx fields SHOULD only be interpreted by the + * LSM specified by @id; they MUST be set to zero/0 when not used. + */ +struct lsm_ctx { + __u64 id; + __u64 flags; + __u64 len; + __u64 ctx_len; + __u8 ctx[] __counted_by(ctx_len); +}; + +/* + * ID tokens to identify Linux Security Modules (LSMs) + * + * These token values are used to uniquely identify specific LSMs + * in the kernel as well as in the kernel's LSM userspace API. + * + * A value of zero/0 is considered undefined and should not be used + * outside the kernel. Values 1-99 are reserved for potential + * future use. + */ +#define LSM_ID_UNDEF 0 +#define LSM_ID_CAPABILITY 100 +#define LSM_ID_SELINUX 101 +#define LSM_ID_SMACK 102 +#define LSM_ID_TOMOYO 103 +#define LSM_ID_APPARMOR 104 +#define LSM_ID_YAMA 105 +#define LSM_ID_LOADPIN 106 +#define LSM_ID_SAFESETID 107 +#define LSM_ID_LOCKDOWN 108 +#define LSM_ID_BPF 109 +#define LSM_ID_LANDLOCK 110 + +/* + * LSM_ATTR_XXX definitions identify different LSM attributes + * which are used in the kernel's LSM userspace API. Support + * for these attributes vary across the different LSMs. None + * are required. + * + * A value of zero/0 is considered undefined and should not be used + * outside the kernel. Values 1-99 are reserved for potential + * future use. + */ +#define LSM_ATTR_UNDEF 0 +#define LSM_ATTR_CURRENT 100 +#define LSM_ATTR_EXEC 101 +#define LSM_ATTR_FSCREATE 102 +#define LSM_ATTR_KEYCREATE 103 +#define LSM_ATTR_PREV 104 +#define LSM_ATTR_SOCKCREATE 105 + +/* + * LSM_FLAG_XXX definitions identify special handling instructions + * for the API. + */ +#define LSM_FLAG_SINGLE 0x0001 + +#endif /* _UAPI_LINUX_LSM_H */ diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h index a03c543cb072..f05f747e444d 100644 --- a/include/uapi/linux/media-bus-format.h +++ b/include/uapi/linux/media-bus-format.h @@ -34,7 +34,7 @@ #define MEDIA_BUS_FMT_FIXED 0x0001 -/* RGB - next is 0x1025 */ +/* RGB - next is 0x1026 */ #define MEDIA_BUS_FMT_RGB444_1X12 0x1016 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 @@ -46,6 +46,7 @@ #define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007 #define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008 #define MEDIA_BUS_FMT_RGB666_1X18 0x1009 +#define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025 #define MEDIA_BUS_FMT_BGR666_1X18 0x1023 #define MEDIA_BUS_FMT_RBG888_1X24 0x100e #define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015 diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h index 6e57743628c0..68a0272e99b7 100644 --- a/include/uapi/linux/mei.h +++ b/include/uapi/linux/mei.h @@ -14,8 +14,8 @@ * FW Client (given by UUID). This opens a communication channel * between a host client and a FW client. From this point every read and write * will communicate with the associated FW client. - * Only in close() (file_operation release()) the communication between - * the clients is disconnected + * Only in close() (file_operation release()) is the communication between + * the clients disconnected. * * The IOCTL argument is a struct with a union that contains * the input parameter and the output parameter for this IOCTL. @@ -51,7 +51,7 @@ struct mei_connect_client_data { * DOC: set and unset event notification for a connected client * * The IOCTL argument is 1 for enabling event notification and 0 for - * disabling the service + * disabling the service. * Return: -EOPNOTSUPP if the devices doesn't support the feature */ #define IOCTL_MEI_NOTIFY_SET _IOW('H', 0x02, __u32) @@ -59,8 +59,8 @@ struct mei_connect_client_data { /** * DOC: retrieve notification * - * The IOCTL output argument is 1 if an event was is pending and 0 otherwise - * the ioctl has to be called in order to acknowledge pending event + * The IOCTL output argument is 1 if an event was pending and 0 otherwise. + * The ioctl has to be called in order to acknowledge pending event. * * Return: -EOPNOTSUPP if the devices doesn't support the feature */ @@ -98,16 +98,16 @@ struct mei_connect_client_data_vtag { * FW Client (given by UUID), and virtual tag (vtag). * The IOCTL opens a communication channel between a host client and * a FW client on a tagged channel. From this point on, every read - * and write will communicate with the associated FW client with + * and write will communicate with the associated FW client * on the tagged channel. - * Upone close() the communication is terminated. + * Upon close() the communication is terminated. * * The IOCTL argument is a struct with a union that contains * the input parameter and the output parameter for this IOCTL. * - * The input parameter is UUID of the FW Client, a vtag [0,255] + * The input parameter is UUID of the FW Client, a vtag [0,255]. * The output parameter is the properties of the FW client - * (FW protocool version and max message size). + * (FW protocol version and max message size). * * Clients that do not support tagged connection * will respond with -EOPNOTSUPP. diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 046d0ccba4cd..a8963f7ef4c2 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -48,7 +48,7 @@ enum { #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to policy */ #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to policy */ -#define MPOL_MF_LAZY (1<<3) /* Modifies '_MOVE: lazy migrate on fault */ +#define MPOL_MF_LAZY (1<<3) /* UNSUPPORTED FLAG: Lazy migrate on fault */ #define MPOL_MF_INTERNAL (1<<4) /* Internal flags start here */ #define MPOL_MF_VALID (MPOL_MF_STRICT | \ diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h index bb242fdcfe6b..ad5478dbad00 100644 --- a/include/uapi/linux/mount.h +++ b/include/uapi/linux/mount.h @@ -138,4 +138,74 @@ struct mount_attr { /* List of all mount_attr versions. */ #define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */ + +/* + * Structure for getting mount/superblock/filesystem info with statmount(2). + * + * The interface is similar to statx(2): individual fields or groups can be + * selected with the @mask argument of statmount(). Kernel will set the @mask + * field according to the supported fields. + * + * If string fields are selected, then the caller needs to pass a buffer that + * has space after the fixed part of the structure. Nul terminated strings are + * copied there and offsets relative to @str are stored in the relevant fields. + * If the buffer is too small, then EOVERFLOW is returned. The actually used + * size is returned in @size. + */ +struct statmount { + __u32 size; /* Total size, including strings */ + __u32 __spare1; + __u64 mask; /* What results were written */ + __u32 sb_dev_major; /* Device ID */ + __u32 sb_dev_minor; + __u64 sb_magic; /* ..._SUPER_MAGIC */ + __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */ + __u32 fs_type; /* [str] Filesystem type */ + __u64 mnt_id; /* Unique ID of mount */ + __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */ + __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */ + __u32 mnt_parent_id_old; + __u64 mnt_attr; /* MOUNT_ATTR_... */ + __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */ + __u64 mnt_peer_group; /* ID of shared peer group */ + __u64 mnt_master; /* Mount receives propagation from this ID */ + __u64 propagate_from; /* Propagation from in current namespace */ + __u32 mnt_root; /* [str] Root of mount relative to root of fs */ + __u32 mnt_point; /* [str] Mountpoint relative to current root */ + __u64 __spare2[50]; + char str[]; /* Variable size part containing strings */ +}; + +/* + * Structure for passing mount ID and miscellaneous parameters to statmount(2) + * and listmount(2). + * + * For statmount(2) @param represents the request mask. + * For listmount(2) @param represents the last listed mount id (or zero). + */ +struct mnt_id_req { + __u32 size; + __u32 spare; + __u64 mnt_id; + __u64 param; +}; + +/* List of all mnt_id_req versions. */ +#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */ + +/* + * @mask bits for statmount(2) + */ +#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */ +#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */ +#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */ +#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */ +#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */ +#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */ + +/* + * Special @mnt_id values that can be passed to listmount + */ +#define LSMT_ROOT 0xffffffffffffffff /* root mount */ + #endif /* _UAPI_LINUX_MOUNT_H */ diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index ee9c49f949a2..74cfe496891e 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -23,91 +23,20 @@ #define MPTCP_SUBFLOW_FLAG_CONNECTED _BITUL(7) #define MPTCP_SUBFLOW_FLAG_MAPVALID _BITUL(8) -enum { - MPTCP_SUBFLOW_ATTR_UNSPEC, - MPTCP_SUBFLOW_ATTR_TOKEN_REM, - MPTCP_SUBFLOW_ATTR_TOKEN_LOC, - MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ, - MPTCP_SUBFLOW_ATTR_MAP_SEQ, - MPTCP_SUBFLOW_ATTR_MAP_SFSEQ, - MPTCP_SUBFLOW_ATTR_SSN_OFFSET, - MPTCP_SUBFLOW_ATTR_MAP_DATALEN, - MPTCP_SUBFLOW_ATTR_FLAGS, - MPTCP_SUBFLOW_ATTR_ID_REM, - MPTCP_SUBFLOW_ATTR_ID_LOC, - MPTCP_SUBFLOW_ATTR_PAD, - __MPTCP_SUBFLOW_ATTR_MAX -}; - -#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1) - -/* netlink interface */ -#define MPTCP_PM_NAME "mptcp_pm" #define MPTCP_PM_CMD_GRP_NAME "mptcp_pm_cmds" #define MPTCP_PM_EV_GRP_NAME "mptcp_pm_events" -#define MPTCP_PM_VER 0x1 - -/* - * ATTR types defined for MPTCP - */ -enum { - MPTCP_PM_ATTR_UNSPEC, - - MPTCP_PM_ATTR_ADDR, /* nested address */ - MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */ - MPTCP_PM_ATTR_SUBFLOWS, /* u32 */ - MPTCP_PM_ATTR_TOKEN, /* u32 */ - MPTCP_PM_ATTR_LOC_ID, /* u8 */ - MPTCP_PM_ATTR_ADDR_REMOTE, /* nested address */ - - __MPTCP_PM_ATTR_MAX -}; - -#define MPTCP_PM_ATTR_MAX (__MPTCP_PM_ATTR_MAX - 1) - -enum { - MPTCP_PM_ADDR_ATTR_UNSPEC, - - MPTCP_PM_ADDR_ATTR_FAMILY, /* u16 */ - MPTCP_PM_ADDR_ATTR_ID, /* u8 */ - MPTCP_PM_ADDR_ATTR_ADDR4, /* struct in_addr */ - MPTCP_PM_ADDR_ATTR_ADDR6, /* struct in6_addr */ - MPTCP_PM_ADDR_ATTR_PORT, /* u16 */ - MPTCP_PM_ADDR_ATTR_FLAGS, /* u32 */ - MPTCP_PM_ADDR_ATTR_IF_IDX, /* s32 */ - - __MPTCP_PM_ADDR_ATTR_MAX -}; - -#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1) - -#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0) -#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) -#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) -#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3) -#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4) - -enum { - MPTCP_PM_CMD_UNSPEC, - MPTCP_PM_CMD_ADD_ADDR, - MPTCP_PM_CMD_DEL_ADDR, - MPTCP_PM_CMD_GET_ADDR, - MPTCP_PM_CMD_FLUSH_ADDRS, - MPTCP_PM_CMD_SET_LIMITS, - MPTCP_PM_CMD_GET_LIMITS, - MPTCP_PM_CMD_SET_FLAGS, - MPTCP_PM_CMD_ANNOUNCE, - MPTCP_PM_CMD_REMOVE, - MPTCP_PM_CMD_SUBFLOW_CREATE, - MPTCP_PM_CMD_SUBFLOW_DESTROY, - - __MPTCP_PM_CMD_AFTER_LAST -}; +#include <linux/mptcp_pm.h> #define MPTCP_INFO_FLAG_FALLBACK _BITUL(0) #define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1) +#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0) +#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) +#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) +#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3) +#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4) + struct mptcp_info { __u8 mptcpi_subflows; __u8 mptcpi_add_addr_signal; @@ -128,95 +57,9 @@ struct mptcp_info { __u64 mptcpi_bytes_sent; __u64 mptcpi_bytes_received; __u64 mptcpi_bytes_acked; + __u8 mptcpi_subflows_total; }; -/* - * MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6, - * sport, dport - * A new MPTCP connection has been created. It is the good time to allocate - * memory and send ADD_ADDR if needed. Depending on the traffic-patterns - * it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent. - * - * MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6, - * sport, dport - * A MPTCP connection is established (can start new subflows). - * - * MPTCP_EVENT_CLOSED: token - * A MPTCP connection has stopped. - * - * MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] - * A new address has been announced by the peer. - * - * MPTCP_EVENT_REMOVED: token, rem_id - * An address has been lost by the peer. - * - * MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, - * saddr4 | saddr6, daddr4 | daddr6, sport, - * dport, backup, if_idx [, error] - * A new subflow has been established. 'error' should not be set. - * - * MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6, - * daddr4 | daddr6, sport, dport, backup, if_idx - * [, error] - * A subflow has been closed. An error (copy of sk_err) could be set if an - * error has been detected for this subflow. - * - * MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6, - * daddr4 | daddr6, sport, dport, backup, if_idx - * [, error] - * The priority of a subflow has changed. 'error' should not be set. - * - * MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 - * A new PM listener is created. - * - * MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 - * A PM listener is closed. - */ -enum mptcp_event_type { - MPTCP_EVENT_UNSPEC = 0, - MPTCP_EVENT_CREATED = 1, - MPTCP_EVENT_ESTABLISHED = 2, - MPTCP_EVENT_CLOSED = 3, - - MPTCP_EVENT_ANNOUNCED = 6, - MPTCP_EVENT_REMOVED = 7, - - MPTCP_EVENT_SUB_ESTABLISHED = 10, - MPTCP_EVENT_SUB_CLOSED = 11, - - MPTCP_EVENT_SUB_PRIORITY = 13, - - MPTCP_EVENT_LISTENER_CREATED = 15, - MPTCP_EVENT_LISTENER_CLOSED = 16, -}; - -enum mptcp_event_attr { - MPTCP_ATTR_UNSPEC = 0, - - MPTCP_ATTR_TOKEN, /* u32 */ - MPTCP_ATTR_FAMILY, /* u16 */ - MPTCP_ATTR_LOC_ID, /* u8 */ - MPTCP_ATTR_REM_ID, /* u8 */ - MPTCP_ATTR_SADDR4, /* be32 */ - MPTCP_ATTR_SADDR6, /* struct in6_addr */ - MPTCP_ATTR_DADDR4, /* be32 */ - MPTCP_ATTR_DADDR6, /* struct in6_addr */ - MPTCP_ATTR_SPORT, /* be16 */ - MPTCP_ATTR_DPORT, /* be16 */ - MPTCP_ATTR_BACKUP, /* u8 */ - MPTCP_ATTR_ERROR, /* u8 */ - MPTCP_ATTR_FLAGS, /* u16 */ - MPTCP_ATTR_TIMEOUT, /* u32 */ - MPTCP_ATTR_IF_IDX, /* s32 */ - MPTCP_ATTR_RESET_REASON,/* u32 */ - MPTCP_ATTR_RESET_FLAGS, /* u32 */ - MPTCP_ATTR_SERVER_SIDE, /* u8 */ - - __MPTCP_ATTR_AFTER_LAST -}; - -#define MPTCP_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1) - /* MPTCP Reset reason codes, rfc8684 */ #define MPTCP_RST_EUNSPEC 0 #define MPTCP_RST_EMPTCP 1 diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h new file mode 100644 index 000000000000..50589e5dd6a3 --- /dev/null +++ b/include/uapi/linux/mptcp_pm.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/mptcp_pm.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_MPTCP_PM_H +#define _UAPI_LINUX_MPTCP_PM_H + +#define MPTCP_PM_NAME "mptcp_pm" +#define MPTCP_PM_VER 1 + +/** + * enum mptcp_event_type + * @MPTCP_EVENT_UNSPEC: unused event + * @MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport A new MPTCP connection has been created. It is the good time + * to allocate memory and send ADD_ADDR if needed. Depending on the + * traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED + * is sent. + * @MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport A MPTCP connection is established (can start new subflows). + * @MPTCP_EVENT_CLOSED: token A MPTCP connection has stopped. + * @MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] A + * new address has been announced by the peer. + * @MPTCP_EVENT_REMOVED: token, rem_id An address has been lost by the peer. + * @MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, saddr4 | + * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error] A new + * subflow has been established. 'error' should not be set. + * @MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6, + * daddr4 | daddr6, sport, dport, backup, if_idx [, error] A subflow has been + * closed. An error (copy of sk_err) could be set if an error has been + * detected for this subflow. + * @MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6, + * daddr4 | daddr6, sport, dport, backup, if_idx [, error] The priority of a + * subflow has changed. 'error' should not be set. + * @MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 A new PM + * listener is created. + * @MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 A PM listener + * is closed. + */ +enum mptcp_event_type { + MPTCP_EVENT_UNSPEC, + MPTCP_EVENT_CREATED, + MPTCP_EVENT_ESTABLISHED, + MPTCP_EVENT_CLOSED, + MPTCP_EVENT_ANNOUNCED = 6, + MPTCP_EVENT_REMOVED, + MPTCP_EVENT_SUB_ESTABLISHED = 10, + MPTCP_EVENT_SUB_CLOSED, + MPTCP_EVENT_SUB_PRIORITY = 13, + MPTCP_EVENT_LISTENER_CREATED = 15, + MPTCP_EVENT_LISTENER_CLOSED, +}; + +enum { + MPTCP_PM_ADDR_ATTR_UNSPEC, + MPTCP_PM_ADDR_ATTR_FAMILY, + MPTCP_PM_ADDR_ATTR_ID, + MPTCP_PM_ADDR_ATTR_ADDR4, + MPTCP_PM_ADDR_ATTR_ADDR6, + MPTCP_PM_ADDR_ATTR_PORT, + MPTCP_PM_ADDR_ATTR_FLAGS, + MPTCP_PM_ADDR_ATTR_IF_IDX, + + __MPTCP_PM_ADDR_ATTR_MAX +}; +#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1) + +enum { + MPTCP_SUBFLOW_ATTR_UNSPEC, + MPTCP_SUBFLOW_ATTR_TOKEN_REM, + MPTCP_SUBFLOW_ATTR_TOKEN_LOC, + MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ, + MPTCP_SUBFLOW_ATTR_MAP_SEQ, + MPTCP_SUBFLOW_ATTR_MAP_SFSEQ, + MPTCP_SUBFLOW_ATTR_SSN_OFFSET, + MPTCP_SUBFLOW_ATTR_MAP_DATALEN, + MPTCP_SUBFLOW_ATTR_FLAGS, + MPTCP_SUBFLOW_ATTR_ID_REM, + MPTCP_SUBFLOW_ATTR_ID_LOC, + MPTCP_SUBFLOW_ATTR_PAD, + + __MPTCP_SUBFLOW_ATTR_MAX +}; +#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1) + +enum { + MPTCP_PM_ENDPOINT_ADDR = 1, + + __MPTCP_PM_ENDPOINT_MAX +}; +#define MPTCP_PM_ENDPOINT_MAX (__MPTCP_PM_ENDPOINT_MAX - 1) + +enum { + MPTCP_PM_ATTR_UNSPEC, + MPTCP_PM_ATTR_ADDR, + MPTCP_PM_ATTR_RCV_ADD_ADDRS, + MPTCP_PM_ATTR_SUBFLOWS, + MPTCP_PM_ATTR_TOKEN, + MPTCP_PM_ATTR_LOC_ID, + MPTCP_PM_ATTR_ADDR_REMOTE, + + __MPTCP_ATTR_AFTER_LAST +}; +#define MPTCP_PM_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1) + +enum mptcp_event_attr { + MPTCP_ATTR_UNSPEC, + MPTCP_ATTR_TOKEN, + MPTCP_ATTR_FAMILY, + MPTCP_ATTR_LOC_ID, + MPTCP_ATTR_REM_ID, + MPTCP_ATTR_SADDR4, + MPTCP_ATTR_SADDR6, + MPTCP_ATTR_DADDR4, + MPTCP_ATTR_DADDR6, + MPTCP_ATTR_SPORT, + MPTCP_ATTR_DPORT, + MPTCP_ATTR_BACKUP, + MPTCP_ATTR_ERROR, + MPTCP_ATTR_FLAGS, + MPTCP_ATTR_TIMEOUT, + MPTCP_ATTR_IF_IDX, + MPTCP_ATTR_RESET_REASON, + MPTCP_ATTR_RESET_FLAGS, + MPTCP_ATTR_SERVER_SIDE, + + __MPTCP_ATTR_MAX +}; +#define MPTCP_ATTR_MAX (__MPTCP_ATTR_MAX - 1) + +enum { + MPTCP_PM_CMD_UNSPEC, + MPTCP_PM_CMD_ADD_ADDR, + MPTCP_PM_CMD_DEL_ADDR, + MPTCP_PM_CMD_GET_ADDR, + MPTCP_PM_CMD_FLUSH_ADDRS, + MPTCP_PM_CMD_SET_LIMITS, + MPTCP_PM_CMD_GET_LIMITS, + MPTCP_PM_CMD_SET_FLAGS, + MPTCP_PM_CMD_ANNOUNCE, + MPTCP_PM_CMD_REMOVE, + MPTCP_PM_CMD_SUBFLOW_CREATE, + MPTCP_PM_CMD_SUBFLOW_DESTROY, + + __MPTCP_PM_CMD_AFTER_LAST +}; +#define MPTCP_PM_CMD_MAX (__MPTCP_PM_CMD_AFTER_LAST - 1) + +#endif /* _UAPI_LINUX_MPTCP_PM_H */ diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h index c1634b95c223..93cb411adf72 100644 --- a/include/uapi/linux/netdev.h +++ b/include/uapi/linux/netdev.h @@ -38,26 +38,118 @@ enum netdev_xdp_act { NETDEV_XDP_ACT_MASK = 127, }; +/** + * enum netdev_xdp_rx_metadata + * @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW + * timestamp via bpf_xdp_metadata_rx_timestamp(). + * @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet + * hash via bpf_xdp_metadata_rx_hash(). + * @NETDEV_XDP_RX_METADATA_VLAN_TAG: Device is capable of exposing receive + * packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag(). + */ +enum netdev_xdp_rx_metadata { + NETDEV_XDP_RX_METADATA_TIMESTAMP = 1, + NETDEV_XDP_RX_METADATA_HASH = 2, + NETDEV_XDP_RX_METADATA_VLAN_TAG = 4, +}; + +/** + * enum netdev_xsk_flags + * @NETDEV_XSK_FLAGS_TX_TIMESTAMP: HW timestamping egress packets is supported + * by the driver. + * @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the + * driver. + */ +enum netdev_xsk_flags { + NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, + NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, +}; + +enum netdev_queue_type { + NETDEV_QUEUE_TYPE_RX, + NETDEV_QUEUE_TYPE_TX, +}; + enum { NETDEV_A_DEV_IFINDEX = 1, NETDEV_A_DEV_PAD, NETDEV_A_DEV_XDP_FEATURES, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, + NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, + NETDEV_A_DEV_XSK_FEATURES, __NETDEV_A_DEV_MAX, NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1) }; enum { + NETDEV_A_PAGE_POOL_ID = 1, + NETDEV_A_PAGE_POOL_IFINDEX, + NETDEV_A_PAGE_POOL_NAPI_ID, + NETDEV_A_PAGE_POOL_INFLIGHT, + NETDEV_A_PAGE_POOL_INFLIGHT_MEM, + NETDEV_A_PAGE_POOL_DETACH_TIME, + + __NETDEV_A_PAGE_POOL_MAX, + NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1) +}; + +enum { + NETDEV_A_PAGE_POOL_STATS_INFO = 1, + NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW, + NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER, + NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY, + NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL, + NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL, + NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT, + + __NETDEV_A_PAGE_POOL_STATS_MAX, + NETDEV_A_PAGE_POOL_STATS_MAX = (__NETDEV_A_PAGE_POOL_STATS_MAX - 1) +}; + +enum { + NETDEV_A_NAPI_IFINDEX = 1, + NETDEV_A_NAPI_ID, + NETDEV_A_NAPI_IRQ, + NETDEV_A_NAPI_PID, + + __NETDEV_A_NAPI_MAX, + NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1) +}; + +enum { + NETDEV_A_QUEUE_ID = 1, + NETDEV_A_QUEUE_IFINDEX, + NETDEV_A_QUEUE_TYPE, + NETDEV_A_QUEUE_NAPI_ID, + + __NETDEV_A_QUEUE_MAX, + NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1) +}; + +enum { NETDEV_CMD_DEV_GET = 1, NETDEV_CMD_DEV_ADD_NTF, NETDEV_CMD_DEV_DEL_NTF, NETDEV_CMD_DEV_CHANGE_NTF, + NETDEV_CMD_PAGE_POOL_GET, + NETDEV_CMD_PAGE_POOL_ADD_NTF, + NETDEV_CMD_PAGE_POOL_DEL_NTF, + NETDEV_CMD_PAGE_POOL_CHANGE_NTF, + NETDEV_CMD_PAGE_POOL_STATS_GET, + NETDEV_CMD_QUEUE_GET, + NETDEV_CMD_NAPI_GET, __NETDEV_CMD_MAX, NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1) }; #define NETDEV_MCGRP_MGMT "mgmt" +#define NETDEV_MCGRP_PAGE_POOL "page-pool" #endif /* _UAPI_LINUX_NETDEV_H */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 8466c2a9938f..117c6a9b845b 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -263,6 +263,7 @@ enum nft_chain_attributes { * @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN) * @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32) * @NFTA_RULE_POSITION_ID: transaction unique identifier of the previous rule (NLA_U32) + * @NFTA_RULE_CHAIN_ID: add the rule to chain by ID, alternative to @NFTA_RULE_CHAIN (NLA_U32) */ enum nft_rule_attributes { NFTA_RULE_UNSPEC, @@ -284,9 +285,11 @@ enum nft_rule_attributes { /** * enum nft_rule_compat_flags - nf_tables rule compat flags * + * @NFT_RULE_COMPAT_F_UNUSED: unused * @NFT_RULE_COMPAT_F_INV: invert the check result */ enum nft_rule_compat_flags { + NFT_RULE_COMPAT_F_UNUSED = (1 << 0), NFT_RULE_COMPAT_F_INV = (1 << 1), NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV, }; diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index e2ae82e3f9f7..f87aaf28a649 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -298,6 +298,8 @@ struct nla_bitfield32 { * entry has attributes again, the policy for those inner ones * and the corresponding maxtype may be specified. * @NL_ATTR_TYPE_BITFIELD32: &struct nla_bitfield32 attribute + * @NL_ATTR_TYPE_SINT: 32-bit or 64-bit signed attribute, aligned to 4B + * @NL_ATTR_TYPE_UINT: 32-bit or 64-bit unsigned attribute, aligned to 4B */ enum netlink_attribute_type { NL_ATTR_TYPE_INVALID, @@ -322,6 +324,9 @@ enum netlink_attribute_type { NL_ATTR_TYPE_NESTED_ARRAY, NL_ATTR_TYPE_BITFIELD32, + + NL_ATTR_TYPE_SINT, + NL_ATTR_TYPE_UINT, }; /** diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h new file mode 100644 index 000000000000..3cd044edee5d --- /dev/null +++ b/include/uapi/linux/nfsd_netlink.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/nfsd.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_NFSD_NETLINK_H +#define _UAPI_LINUX_NFSD_NETLINK_H + +#define NFSD_FAMILY_NAME "nfsd" +#define NFSD_FAMILY_VERSION 1 + +enum { + NFSD_A_RPC_STATUS_XID = 1, + NFSD_A_RPC_STATUS_FLAGS, + NFSD_A_RPC_STATUS_PROG, + NFSD_A_RPC_STATUS_VERSION, + NFSD_A_RPC_STATUS_PROC, + NFSD_A_RPC_STATUS_SERVICE_TIME, + NFSD_A_RPC_STATUS_PAD, + NFSD_A_RPC_STATUS_SADDR4, + NFSD_A_RPC_STATUS_DADDR4, + NFSD_A_RPC_STATUS_SADDR6, + NFSD_A_RPC_STATUS_DADDR6, + NFSD_A_RPC_STATUS_SPORT, + NFSD_A_RPC_STATUS_DPORT, + NFSD_A_RPC_STATUS_COMPOUND_OPS, + + __NFSD_A_RPC_STATUS_MAX, + NFSD_A_RPC_STATUS_MAX = (__NFSD_A_RPC_STATUS_MAX - 1) +}; + +enum { + NFSD_CMD_RPC_STATUS_GET = 1, + + __NFSD_CMD_MAX, + NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1) +}; + +#endif /* _UAPI_LINUX_NFSD_NETLINK_H */ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 88eb85c63029..1ccdcae24372 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -72,7 +72,7 @@ * For drivers supporting TDLS with external setup (WIPHY_FLAG_SUPPORTS_TDLS * and WIPHY_FLAG_TDLS_EXTERNAL_SETUP), the station lifetime is as follows: * - a setup station entry is added, not yet authorized, without any rate - * or capability information, this just exists to avoid race conditions + * or capability information; this just exists to avoid race conditions * - when the TDLS setup is done, a single NL80211_CMD_SET_STATION is valid * to add rate and capability information to the station and at the same * time mark it authorized. @@ -87,7 +87,7 @@ * DOC: Frame transmission/registration support * * Frame transmission and registration support exists to allow userspace - * management entities such as wpa_supplicant react to management frames + * management entities such as wpa_supplicant to react to management frames * that are not being handled by the kernel. This includes, for example, * certain classes of action frames that cannot be handled in the kernel * for various reasons. @@ -113,7 +113,7 @@ * * Frame transmission allows userspace to send for example the required * responses to action frames. It is subject to some sanity checking, - * but many frames can be transmitted. When a frame was transmitted, its + * but many frames can be transmitted. When a frame is transmitted, its * status is indicated to the sending socket. * * For more technical details, see the corresponding command descriptions @@ -123,7 +123,7 @@ /** * DOC: Virtual interface / concurrency capabilities * - * Some devices are able to operate with virtual MACs, they can have + * Some devices are able to operate with virtual MACs; they can have * more than one virtual interface. The capability handling for this * is a bit complex though, as there may be a number of restrictions * on the types of concurrency that are supported. @@ -135,7 +135,7 @@ * Once concurrency is desired, more attributes must be observed: * To start with, since some interface types are purely managed in * software, like the AP-VLAN type in mac80211 for example, there's - * an additional list of these, they can be added at any time and + * an additional list of these; they can be added at any time and * are only restricted by some semantic restrictions (e.g. AP-VLAN * cannot be added without a corresponding AP interface). This list * is exported in the %NL80211_ATTR_SOFTWARE_IFTYPES attribute. @@ -164,17 +164,17 @@ * Packet coalesce feature helps to reduce number of received interrupts * to host by buffering these packets in firmware/hardware for some * predefined time. Received interrupt will be generated when one of the - * following events occur. + * following events occurs. * a) Expiration of hardware timer whose expiration time is set to maximum * coalescing delay of matching coalesce rule. - * b) Coalescing buffer in hardware reaches it's limit. + * b) Coalescing buffer in hardware reaches its limit. * c) Packet doesn't match any of the configured coalesce rules. * * User needs to configure following parameters for creating a coalesce * rule. * a) Maximum coalescing delay * b) List of packet patterns which needs to be matched - * c) Condition for coalescence. pattern 'match' or 'no match' + * c) Condition for coalescence: pattern 'match' or 'no match' * Multiple such rules can be created. */ @@ -213,7 +213,7 @@ /** * DOC: FILS shared key authentication offload * - * FILS shared key authentication offload can be advertized by drivers by + * FILS shared key authentication offload can be advertised by drivers by * setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support * FILS shared key authentication offload should be able to construct the * authentication and association frames for FILS shared key authentication and @@ -239,7 +239,7 @@ * The PMKSA can be maintained in userspace persistently so that it can be used * later after reboots or wifi turn off/on also. * - * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS + * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertised by a FILS * capable AP supporting PMK caching. It specifies the scope within which the * PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and * %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based @@ -290,12 +290,12 @@ * If the configuration needs to be applied for specific peer then the MAC * address of the peer needs to be passed in %NL80211_ATTR_MAC, otherwise the * configuration will be applied for all the connected peers in the vif except - * any peers that have peer specific configuration for the TID by default; if - * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer specific values + * any peers that have peer-specific configuration for the TID by default; if + * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer-specific values * will be overwritten. * - * All this configuration is valid only for STA's current connection - * i.e. the configuration will be reset to default when the STA connects back + * All this configuration is valid only for STA's current connection, + * i.e., the configuration will be reset to default when the STA connects back * after disconnection/roaming, and this configuration will be cleared when * the interface goes down. */ @@ -326,7 +326,7 @@ /** * DOC: Multi-Link Operation * - * In Multi-Link Operation, a connection between to MLDs utilizes multiple + * In Multi-Link Operation, a connection between two MLDs utilizes multiple * links. To use this in nl80211, various commands and responses now need * to or will include the new %NL80211_ATTR_MLO_LINKS attribute. * Additionally, various commands that need to operate on a specific link @@ -335,6 +335,15 @@ */ /** + * DOC: OWE DH IE handling offload + * + * By setting @NL80211_EXT_FEATURE_OWE_OFFLOAD flag, drivers can indicate + * kernel/application space to avoid DH IE handling. When this flag is + * advertised, the driver/device will take care of DH IE inclusion and + * processing of peer DH IE to generate PMK. + */ + +/** * enum nl80211_commands - supported nl80211 commands * * @NL80211_CMD_UNSPEC: unspecified command to catch errors @@ -512,7 +521,7 @@ * %NL80211_ATTR_SCHED_SCAN_PLANS. If %NL80211_ATTR_SCHED_SCAN_PLANS is * not specified and only %NL80211_ATTR_SCHED_SCAN_INTERVAL is specified, * scheduled scan will run in an infinite loop with the specified interval. - * These attributes are mutually exculsive, + * These attributes are mutually exclusive, * i.e. NL80211_ATTR_SCHED_SCAN_INTERVAL must not be passed if * NL80211_ATTR_SCHED_SCAN_PLANS is defined. * If for some reason scheduled scan is aborted by the driver, all scan @@ -543,7 +552,7 @@ * %NL80211_CMD_STOP_SCHED_SCAN command is received or when the interface * is brought down while a scheduled scan was running. * - * @NL80211_CMD_GET_SURVEY: get survey resuls, e.g. channel occupation + * @NL80211_CMD_GET_SURVEY: get survey results, e.g. channel occupation * or noise level * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to * NL80211_CMD_GET_SURVEY and on the "scan" multicast group) @@ -554,12 +563,13 @@ * using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID, * %NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS * authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier - * advertized by a FILS capable AP identifying the scope of PMKSA in an + * advertised by a FILS capable AP identifying the scope of PMKSA in an * ESS. * @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC * (for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID, * %NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS - * authentication. + * authentication. Additionally in case of SAE offload and OWE offloads + * PMKSA entry can be deleted using %NL80211_ATTR_SSID. * @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries. * * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain @@ -598,7 +608,7 @@ * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify * the SSID (mainly for association, but is included in authentication * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ + - * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequence of the + * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequency of the * channel in MHz. %NL80211_ATTR_AUTH_TYPE is used to specify the * authentication type. %NL80211_ATTR_IE is used to define IEs * (VendorSpecificInfo, but also including RSN IE and FT IEs) to be added @@ -807,7 +817,7 @@ * reached. * @NL80211_CMD_SET_CHANNEL: Set the channel (using %NL80211_ATTR_WIPHY_FREQ * and the attributes determining channel width) the given interface - * (identifed by %NL80211_ATTR_IFINDEX) shall operate on. + * (identified by %NL80211_ATTR_IFINDEX) shall operate on. * In case multiple channels are supported by the device, the mechanism * with which it switches channels is implementation-defined. * When a monitor interface is given, it can only switch channel while @@ -879,7 +889,7 @@ * inform userspace of the new replay counter. * * @NL80211_CMD_PMKSA_CANDIDATE: This is used as an event to inform userspace - * of PMKSA caching dandidates. + * of PMKSA caching candidates. * * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup). * In addition, this can be used as an event to request userspace to take @@ -915,7 +925,7 @@ * * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface * by sending a null data frame to it and reporting when the frame is - * acknowleged. This is used to allow timing out inactive clients. Uses + * acknowledged. This is used to allow timing out inactive clients. Uses * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a * direct reply with an %NL80211_ATTR_COOKIE that is later used to match * up the event with the request. The event includes the same data and @@ -1126,11 +1136,15 @@ * @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously * configured PMK for the authenticator address identified by * %NL80211_ATTR_MAC. - * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates an 802.1X FT roam was - * completed successfully. Drivers that support 4 way handshake offload - * should send this event after indicating 802.1X FT assocation with - * %NL80211_CMD_ROAM. If the 4 way handshake failed %NL80211_CMD_DISCONNECT - * should be indicated instead. + * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates port is authorized and + * open for regular data traffic. For STA/P2P-client, this event is sent + * with AP MAC address and for AP/P2P-GO, the event carries the STA/P2P- + * client MAC address. + * Drivers that support 4 way handshake offload should send this event for + * STA/P2P-client after successful 4-way HS or after 802.1X FT following + * NL80211_CMD_CONNECT or NL80211_CMD_ROAM. Drivers using AP/P2P-GO 4-way + * handshake offload should send this event on successful completion of + * 4-way handshake with the peer (STA/P2P-client). * @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request * and RX notification. This command is used both as a request to transmit * a control port frame and as a notification that a control port frame @@ -1314,6 +1328,11 @@ * Multi-Link reconfiguration. %NL80211_ATTR_MLO_LINKS is used to provide * information about the removed STA MLD setup links. * + * @NL80211_CMD_SET_TID_TO_LINK_MAPPING: Set the TID to Link Mapping for a + * non-AP MLD station. The %NL80211_ATTR_MLO_TTLM_DLINK and + * %NL80211_ATTR_MLO_TTLM_ULINK attributes are used to specify the + * TID to Link mapping for downlink/uplink traffic. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1569,6 +1588,8 @@ enum nl80211_commands { NL80211_CMD_LINKS_REMOVED, + NL80211_CMD_SET_TID_TO_LINK_MAPPING, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1826,7 +1847,7 @@ enum nl80211_commands { * using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is * to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER * flag. When used with %NL80211_ATTR_CONTROL_PORT_NO_PREAUTH, pre-auth - * frames are not forwared over the control port. + * frames are not forwarded over the control port. * * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. * We recommend using nested, driver-specific attributes within this. @@ -1963,10 +1984,10 @@ enum nl80211_commands { * bit. Depending on which antennas are selected in the bitmap, 802.11n * drivers can derive which chainmasks to use (if all antennas belonging to * a particular chain are disabled this chain should be disabled) and if - * a chain has diversity antennas wether diversity should be used or not. + * a chain has diversity antennas whether diversity should be used or not. * HT capabilities (STBC, TX Beamforming, Antenna selection) can be * derived from the available chains after applying the antenna mask. - * Non-802.11n drivers can derive wether to use diversity or not. + * Non-802.11n drivers can derive whether to use diversity or not. * Drivers may reject configurations or RX/TX mask combinations they cannot * support by returning -EINVAL. * @@ -2536,7 +2557,7 @@ enum nl80211_commands { * from successful FILS authentication and is used with * %NL80211_CMD_CONNECT. * - * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP + * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertised by a FILS AP * identifying the scope of PMKSAs. This is used with * @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA. * @@ -2690,11 +2711,13 @@ enum nl80211_commands { * * @NL80211_ATTR_FILS_DISCOVERY: Optional parameter to configure FILS * discovery. It is a nested attribute, see - * &enum nl80211_fils_discovery_attributes. + * &enum nl80211_fils_discovery_attributes. Userspace should pass an empty + * nested attribute to disable this feature and delete the templates. * * @NL80211_ATTR_UNSOL_BCAST_PROBE_RESP: Optional parameter to configure * unsolicited broadcast probe response. It is a nested attribute, see - * &enum nl80211_unsol_bcast_probe_resp_attributes. + * &enum nl80211_unsol_bcast_probe_resp_attributes. Userspace should pass an empty + * nested attribute to disable this feature and delete the templates. * * @NL80211_ATTR_S1G_CAPABILITY: S1G Capability information element (from * association request when used with NL80211_CMD_NEW_STATION) @@ -2815,6 +2838,19 @@ enum nl80211_commands { * @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is * disabled. * + * @NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA: Include BSS usage data, i.e. + * include BSSes that can only be used in restricted scenarios and/or + * cannot be used at all. + * + * @NL80211_ATTR_MLO_TTLM_DLINK: Binary attribute specifying the downlink TID to + * link mapping. The length is 8 * sizeof(u16). For each TID the link + * mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element) + * in Draft P802.11be_D4.0. + * @NL80211_ATTR_MLO_TTLM_ULINK: Binary attribute specifying the uplink TID to + * link mapping. The length is 8 * sizeof(u16). For each TID the link + * mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element) + * in Draft P802.11be_D4.0. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3353,6 +3389,11 @@ enum nl80211_attrs { NL80211_ATTR_MLO_LINK_DISABLED, + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA, + + NL80211_ATTR_MLO_TTLM_DLINK, + NL80211_ATTR_MLO_TTLM_ULINK, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -4159,7 +4200,7 @@ enum nl80211_wmm_rule { * (100 * dBm). * @NL80211_FREQUENCY_ATTR_DFS_STATE: current state for DFS * (enum nl80211_dfs_state) - * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in miliseconds for how long + * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in milliseconds for how long * this channel is in this DFS state. * @NL80211_FREQUENCY_ATTR_NO_HT40_MINUS: HT40- isn't possible with this * channel as the control channel @@ -4213,6 +4254,16 @@ enum nl80211_wmm_rule { * as the primary or any of the secondary channels isn't possible * @NL80211_FREQUENCY_ATTR_NO_EHT: EHT operation is not allowed on this channel * in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_PSD: Power spectral density (in dBm) that + * is allowed on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_DFS_CONCURRENT: Operation on this channel is + * allowed for peer-to-peer or adhoc communication under the control + * of a DFS master which operates on the same channel (FCC-594280 D01 + * Section B.3). Should be used together with %NL80211_RRF_DFS only. + * @NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT: Client connection to VLP AP + * not allowed using this channel + * @NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT: Client connection to AFC AP + * not allowed using this channel * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -4251,6 +4302,10 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_16MHZ, NL80211_FREQUENCY_ATTR_NO_320MHZ, NL80211_FREQUENCY_ATTR_NO_EHT, + NL80211_FREQUENCY_ATTR_PSD, + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT, + NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT, + NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -4351,6 +4406,8 @@ enum nl80211_reg_type { * a given frequency range. The value is in mBm (100 * dBm). * @NL80211_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds. * If not present or 0 default CAC time will be used. + * @NL80211_ATTR_POWER_RULE_PSD: power spectral density (in dBm). + * This could be negative. * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number * currently defined * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use @@ -4368,6 +4425,8 @@ enum nl80211_reg_rule_attr { NL80211_ATTR_DFS_CAC_TIME, + NL80211_ATTR_POWER_RULE_PSD, + /* keep last */ __NL80211_REG_RULE_ATTR_AFTER_LAST, NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1 @@ -4451,6 +4510,13 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_NO_HE: HE operation not allowed * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed * @NL80211_RRF_NO_EHT: EHT operation not allowed + * @NL80211_RRF_PSD: Ruleset has power spectral density value + * @NL80211_RRF_DFS_CONCURRENT: Operation on this channel is allowed for + peer-to-peer or adhoc communication under the control of a DFS master + which operates on the same channel (FCC-594280 D01 Section B.3). + Should be used together with %NL80211_RRF_DFS only. + * @NL80211_RRF_NO_UHB_VLP_CLIENT: Client connection to VLP AP not allowed + * @NL80211_RRF_NO_UHB_AFC_CLIENT: Client connection to AFC AP not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -4471,6 +4537,10 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_HE = 1<<17, NL80211_RRF_NO_320MHZ = 1<<18, NL80211_RRF_NO_EHT = 1<<19, + NL80211_RRF_PSD = 1<<20, + NL80211_RRF_DFS_CONCURRENT = 1<<21, + NL80211_RRF_NO_UHB_VLP_CLIENT = 1<<22, + NL80211_RRF_NO_UHB_AFC_CLIENT = 1<<23, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR @@ -5008,6 +5078,33 @@ enum nl80211_bss_scan_width { }; /** + * enum nl80211_bss_use_for - bitmap indicating possible BSS use + * @NL80211_BSS_USE_FOR_NORMAL: Use this BSS for normal "connection", + * including IBSS/MBSS depending on the type. + * @NL80211_BSS_USE_FOR_MLD_LINK: This BSS can be used as a link in an + * MLO connection. Note that for an MLO connection, all links including + * the assoc link must have this flag set, and the assoc link must + * additionally have %NL80211_BSS_USE_FOR_NORMAL set. + */ +enum nl80211_bss_use_for { + NL80211_BSS_USE_FOR_NORMAL = 1 << 0, + NL80211_BSS_USE_FOR_MLD_LINK = 1 << 1, +}; + +/** + * enum nl80211_bss_cannot_use_reasons - reason(s) connection to a + * BSS isn't possible + * @NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY: NSTR nonprimary links aren't + * supported by the device, and this BSS entry represents one. + * @NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH: STA is not supporting + * the AP power type (SP, VLP, AP) that the AP uses. + */ +enum nl80211_bss_cannot_use_reasons { + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 1 << 0, + NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 1 << 1, +}; + +/** * enum nl80211_bss - netlink attributes for a BSS * * @__NL80211_BSS_INVALID: invalid @@ -5038,7 +5135,7 @@ enum nl80211_bss_scan_width { * elements from a Beacon frame (bin); not present if no Beacon frame has * yet been received * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel - * (u32, enum nl80211_bss_scan_width) + * (u32, enum nl80211_bss_scan_width) - No longer used! * @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64) * (not present if no beacon frame has been received yet) * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and @@ -5059,6 +5156,14 @@ enum nl80211_bss_scan_width { * @NL80211_BSS_FREQUENCY_OFFSET: frequency offset in KHz * @NL80211_BSS_MLO_LINK_ID: MLO link ID of the BSS (u8). * @NL80211_BSS_MLD_ADDR: MLD address of this BSS if connected to it. + * @NL80211_BSS_USE_FOR: u32 bitmap attribute indicating what the BSS can be + * used for, see &enum nl80211_bss_use_for. + * @NL80211_BSS_CANNOT_USE_REASONS: Indicates the reason that this BSS cannot + * be used for all or some of the possible uses by the device reporting it, + * even though its presence was detected. + * This is a u64 attribute containing a bitmap of values from + * &enum nl80211_cannot_use_reasons, note that the attribute may be missing + * if no reasons are specified. * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -5086,6 +5191,8 @@ enum nl80211_bss { NL80211_BSS_FREQUENCY_OFFSET, NL80211_BSS_MLO_LINK_ID, NL80211_BSS_MLD_ADDR, + NL80211_BSS_USE_FOR, + NL80211_BSS_CANNOT_USE_REASONS, /* keep last */ __NL80211_BSS_AFTER_LAST, @@ -5434,7 +5541,7 @@ enum nl80211_tx_rate_setting { * (%NL80211_TID_CONFIG_ATTR_TIDS, %NL80211_TID_CONFIG_ATTR_OVERRIDE). * @NL80211_TID_CONFIG_ATTR_PEER_SUPP: same as the previous per-vif one, but * per peer instead. - * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribue, if set indicates + * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribute, if set indicates * that the new configuration overrides all previous peer * configurations, otherwise previous peer specific configurations * should be left untouched. @@ -5817,7 +5924,7 @@ enum nl80211_attr_coalesce_rule { /** * enum nl80211_coalesce_condition - coalesce rule conditions - * @NL80211_COALESCE_CONDITION_MATCH: coalaesce Rx packets when patterns + * @NL80211_COALESCE_CONDITION_MATCH: coalesce Rx packets when patterns * in a rule are matched. * @NL80211_COALESCE_CONDITION_NO_MATCH: coalesce Rx packets when patterns * in a rule are not matched. @@ -5916,7 +6023,7 @@ enum nl80211_if_combination_attrs { * enum nl80211_plink_state - state of a mesh peer link finite state machine * * @NL80211_PLINK_LISTEN: initial state, considered the implicit - * state of non existent mesh peer links + * state of non-existent mesh peer links * @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to * this mesh peer * @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received @@ -6209,7 +6316,7 @@ enum nl80211_feature_flags { * request to use RRM (see %NL80211_ATTR_USE_RRM) with * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set * the ASSOC_REQ_USE_RRM flag in the association request even if - * NL80211_FEATURE_QUIET is not advertized. + * NL80211_FEATURE_QUIET is not advertised. * @NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER: This device supports MU-MIMO air * sniffer which means that it can be configured to hear packets from * certain groups which can be configured by the @@ -6221,13 +6328,15 @@ enum nl80211_feature_flags { * the BSS that the interface that requested the scan is connected to * (if available). * @NL80211_EXT_FEATURE_BSS_PARENT_TSF: Per BSS, this driver reports the - * time the last beacon/probe was received. The time is the TSF of the - * BSS that the interface that requested the scan is connected to - * (if available). + * time the last beacon/probe was received. For a non-MLO connection, the + * time is the TSF of the BSS that the interface that requested the scan is + * connected to (if available). For an MLO connection, the time is the TSF + * of the BSS corresponding with link ID specified in the scan request (if + * specified). * @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of * channel dwell time. * @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate - * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate. + * configuration (AP/mesh), supporting a legacy (non-HT/VHT) rate. * @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate * configuration (AP/mesh) with HT rates. * @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate @@ -6400,6 +6509,17 @@ enum nl80211_feature_flags { * in authentication and deauthentication frames sent to unassociated peer * using @NL80211_CMD_FRAME. * + * @NL80211_EXT_FEATURE_OWE_OFFLOAD: Driver/Device wants to do OWE DH IE + * handling in station mode. + * + * @NL80211_EXT_FEATURE_OWE_OFFLOAD_AP: Driver/Device wants to do OWE DH IE + * handling in AP mode. + * + * @NL80211_EXT_FEATURE_DFS_CONCURRENT: The device supports peer-to-peer or + * ad hoc operation on DFS channels under the control of a concurrent + * DFS master on the same channel as described in FCC-594280 D01 + * (Section B.3). This, for example, allows P2P GO and P2P clients to + * operate on DFS channels as long as there's a concurrent BSS connection. * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -6471,6 +6591,9 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_PUNCT, NL80211_EXT_FEATURE_SECURE_NAN, NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA, + NL80211_EXT_FEATURE_OWE_OFFLOAD, + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP, + NL80211_EXT_FEATURE_DFS_CONCURRENT, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -6555,7 +6678,7 @@ enum nl80211_timeout_reason { * request parameters IE in the probe request * @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses * @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at - * rate of at least 5.5M. In case non OCE AP is discovered in the channel, + * rate of at least 5.5M. In case non-OCE AP is discovered in the channel, * only the first probe req in the channel will be sent in high rate. * @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request * tx deferral (dot11FILSProbeDelay shall be set to 15ms) @@ -6591,7 +6714,7 @@ enum nl80211_timeout_reason { * received on the 2.4/5 GHz channels to actively scan only the 6GHz * channels on which APs are expected to be found. Note that when not set, * the scan logic would scan all 6GHz channels, but since transmission of - * probe requests on non PSC channels is limited, it is highly likely that + * probe requests on non-PSC channels is limited, it is highly likely that * these channels would passively be scanned. Also note that when the flag * is set, in addition to the colocated APs, PSC channels would also be * scanned if the user space has asked for it. @@ -6923,7 +7046,7 @@ enum nl80211_nan_func_term_reason { * The instance ID for the follow up Service Discovery Frame. This is u8. * @NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID: relevant if the function's type * is follow up. This is a u8. - * The requestor instance ID for the follow up Service Discovery Frame. + * The requester instance ID for the follow up Service Discovery Frame. * @NL80211_NAN_FUNC_FOLLOW_UP_DEST: the MAC address of the recipient of the * follow up Service Discovery Frame. This is a binary attribute. * @NL80211_NAN_FUNC_CLOSE_RANGE: is this function limited for devices in a @@ -7313,7 +7436,7 @@ enum nl80211_peer_measurement_attrs { * @NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED: flag attribute indicating if * trigger based ranging measurement is supported * @NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED: flag attribute indicating - * if non trigger based ranging measurement is supported + * if non-trigger-based ranging measurement is supported * * @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal * @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number @@ -7367,7 +7490,7 @@ enum nl80211_peer_measurement_ftm_capa { * if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based * ranging will be used. - * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non trigger based + * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non-trigger-based * ranging measurement (flag) * This attribute and %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED are * mutually exclusive. @@ -7445,7 +7568,7 @@ enum nl80211_peer_measurement_ftm_failure_reasons { * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS: number of FTM Request frames * transmitted (u32, optional) * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES: number of FTM Request frames - * that were acknowleged (u32, optional) + * that were acknowledged (u32, optional) * @NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME: retry time received from the * busy peer (u32, seconds) * @NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP: actual number of bursts exponent @@ -7606,7 +7729,7 @@ enum nl80211_iftype_akm_attributes { * @NL80211_FILS_DISCOVERY_ATTR_INT_MIN: Minimum packet interval (u32, TU). * Allowed range: 0..10000 (TU = Time Unit) * @NL80211_FILS_DISCOVERY_ATTR_INT_MAX: Maximum packet interval (u32, TU). - * Allowed range: 0..10000 (TU = Time Unit) + * Allowed range: 0..10000 (TU = Time Unit). If set to 0, the feature is disabled. * @NL80211_FILS_DISCOVERY_ATTR_TMPL: Template data for FILS discovery action * frame including the headers. * @@ -7639,7 +7762,8 @@ enum nl80211_fils_discovery_attributes { * * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT: Maximum packet interval (u32, TU). * Allowed range: 0..20 (TU = Time Unit). IEEE P802.11ax/D6.0 - * 26.17.2.3.2 (AP behavior for fast passive scanning). + * 26.17.2.3.2 (AP behavior for fast passive scanning). If set to 0, the feature is + * disabled. * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL: Unsolicited broadcast probe response * frame template (binary). * diff --git a/include/uapi/linux/npcm-video.h b/include/uapi/linux/npcm-video.h new file mode 100644 index 000000000000..1d39f6f38c96 --- /dev/null +++ b/include/uapi/linux/npcm-video.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Controls header for NPCM video driver + * + * Copyright (C) 2022 Nuvoton Technologies + */ + +#ifndef _UAPI_LINUX_NPCM_VIDEO_H +#define _UAPI_LINUX_NPCM_VIDEO_H + +#include <linux/v4l2-controls.h> + +/* + * Check Documentation/userspace-api/media/drivers/npcm-video.rst for control + * details. + */ + +/* + * This control is meant to set the mode of NPCM Video Capture/Differentiation + * (VCD) engine. + * + * The VCD engine supports two modes: + * COMPLETE - Capture the next complete frame into memory. + * DIFF - Compare the incoming frame with the frame stored in memory, and + * updates the differentiated frame in memory. + */ +#define V4L2_CID_NPCM_CAPTURE_MODE (V4L2_CID_USER_NPCM_BASE + 0) + +enum v4l2_npcm_capture_mode { + V4L2_NPCM_CAPTURE_MODE_COMPLETE = 0, /* COMPLETE mode */ + V4L2_NPCM_CAPTURE_MODE_DIFF = 1, /* DIFF mode */ +}; + +/* + * This control is meant to get the count of compressed HEXTILE rectangles which + * is relevant to the number of differentiated frames if VCD is in DIFF mode. + * And the count will always be 1 if VCD is in COMPLETE mode. + */ +#define V4L2_CID_NPCM_RECT_COUNT (V4L2_CID_USER_NPCM_BASE + 1) + +#endif /* _UAPI_LINUX_NPCM_VIDEO_H */ diff --git a/include/uapi/linux/nsm.h b/include/uapi/linux/nsm.h new file mode 100644 index 000000000000..e529f232f6c0 --- /dev/null +++ b/include/uapi/linux/nsm.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + */ + +#ifndef __UAPI_LINUX_NSM_H +#define __UAPI_LINUX_NSM_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +#define NSM_MAGIC 0x0A + +#define NSM_REQUEST_MAX_SIZE 0x1000 +#define NSM_RESPONSE_MAX_SIZE 0x3000 + +struct nsm_iovec { + __u64 addr; /* Virtual address of target buffer */ + __u64 len; /* Length of target buffer */ +}; + +/* Raw NSM message. Only available with CAP_SYS_ADMIN. */ +struct nsm_raw { + /* Request from user */ + struct nsm_iovec request; + /* Response to user */ + struct nsm_iovec response; +}; +#define NSM_IOCTL_RAW _IOWR(NSM_MAGIC, 0x0, struct nsm_raw) + +#endif /* __UAPI_LINUX_NSM_H */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index e5f558d96493..a39193213ff2 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -80,6 +80,7 @@ #define PCI_HEADER_TYPE_NORMAL 0 #define PCI_HEADER_TYPE_BRIDGE 1 #define PCI_HEADER_TYPE_CARDBUS 2 +#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */ #define PCI_BIST 0x0f /* 8 bits */ #define PCI_BIST_CODE_MASK 0x0f /* Return result */ @@ -637,6 +638,7 @@ #define PCI_EXP_RTCAP 0x1e /* Root Capabilities */ #define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ #define PCI_EXP_RTSTA 0x20 /* Root Status */ +#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ /* @@ -930,12 +932,13 @@ /* Process Address Space ID */ #define PCI_PASID_CAP 0x04 /* PASID feature register */ -#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */ -#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */ +#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_WIDTH 0x1f00 #define PCI_PASID_CTRL 0x06 /* PASID control register */ -#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */ -#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */ -#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */ +#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */ +#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */ +#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */ #define PCI_EXT_CAP_PASID_SIZEOF 8 /* Single Root I/O Virtualization */ @@ -975,6 +978,8 @@ #define PCI_LTR_VALUE_MASK 0x000003ff #define PCI_LTR_SCALE_MASK 0x00001c00 #define PCI_LTR_SCALE_SHIFT 10 +#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */ +#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */ #define PCI_EXT_CAP_LTR_SIZEOF 8 /* Access Control Service */ @@ -1042,9 +1047,16 @@ #define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */ #define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */ #define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */ +#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */ #define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */ @@ -1088,6 +1100,8 @@ #define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ +#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */ +#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */ /* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */ #define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */ diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h index f9c1af8d141b..94b46b043b53 100644 --- a/include/uapi/linux/pcitest.h +++ b/include/uapi/linux/pcitest.h @@ -11,7 +11,8 @@ #define __UAPI_LINUX_PCITEST_H #define PCITEST_BAR _IO('P', 0x1) -#define PCITEST_LEGACY_IRQ _IO('P', 0x2) +#define PCITEST_INTX_IRQ _IO('P', 0x2) +#define PCITEST_LEGACY_IRQ PCITEST_INTX_IRQ #define PCITEST_MSI _IOW('P', 0x3, int) #define PCITEST_WRITE _IOW('P', 0x4, unsigned long) #define PCITEST_READ _IOW('P', 0x5, unsigned long) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 39c6a250dd1b..3a64499b0f5d 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -235,6 +237,8 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -982,6 +986,12 @@ enum perf_event_type { * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; + * # + * # The format of the counters is decided by the + * # "branch_counter_nr" and "branch_counter_width", + * # which are defined in the ABI. + * # + * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi @@ -1427,6 +1437,9 @@ struct perf_branch_entry { reserved:31; }; +/* Size of used info bits in struct perf_branch_entry */ +#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 + union perf_sample_weight { __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index c7082cc60d21..ea277039f89d 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -99,7 +99,7 @@ enum { * versions. */ #define TCA_ACT_GACT 5 -#define TCA_ACT_IPT 6 +#define TCA_ACT_IPT 6 /* obsoleted, can be reused */ #define TCA_ACT_PEDIT 7 #define TCA_ACT_MIRRED 8 #define TCA_ACT_NAT 9 @@ -120,7 +120,7 @@ enum tca_id { TCA_ID_UNSPEC = 0, TCA_ID_POLICE = 1, TCA_ID_GACT = TCA_ACT_GACT, - TCA_ID_IPT = TCA_ACT_IPT, + TCA_ID_IPT = TCA_ACT_IPT, /* Obsoleted, can be reused */ TCA_ID_PEDIT = TCA_ACT_PEDIT, TCA_ID_MIRRED = TCA_ACT_MIRRED, TCA_ID_NAT = TCA_ACT_NAT, @@ -280,37 +280,6 @@ struct tc_u32_pcnt { #define TC_U32_MAXDEPTH 8 - -/* RSVP filter */ - -enum { - TCA_RSVP_UNSPEC, - TCA_RSVP_CLASSID, - TCA_RSVP_DST, - TCA_RSVP_SRC, - TCA_RSVP_PINFO, - TCA_RSVP_POLICE, - TCA_RSVP_ACT, - __TCA_RSVP_MAX -}; - -#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 ) - -struct tc_rsvp_gpi { - __u32 key; - __u32 mask; - int offset; -}; - -struct tc_rsvp_pinfo { - struct tc_rsvp_gpi dpi; - struct tc_rsvp_gpi spi; - __u8 protocol; - __u8 tunnelid; - __u8 tunnelhdr; - __u8 pad; -}; - /* ROUTE filter */ enum { @@ -341,22 +310,6 @@ enum { #define TCA_FW_MAX (__TCA_FW_MAX - 1) -/* TC index filter */ - -enum { - TCA_TCINDEX_UNSPEC, - TCA_TCINDEX_HASH, - TCA_TCINDEX_MASK, - TCA_TCINDEX_SHIFT, - TCA_TCINDEX_FALL_THROUGH, - TCA_TCINDEX_CLASSID, - TCA_TCINDEX_POLICE, - TCA_TCINDEX_ACT, - __TCA_TCINDEX_MAX -}; - -#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1) - /* Flow filter */ enum { diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 3f85ae578056..a3cd0c2dc995 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -477,115 +477,6 @@ enum { #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) - -/* CBQ section */ - -#define TC_CBQ_MAXPRIO 8 -#define TC_CBQ_MAXLEVEL 8 -#define TC_CBQ_DEF_EWMA 5 - -struct tc_cbq_lssopt { - unsigned char change; - unsigned char flags; -#define TCF_CBQ_LSS_BOUNDED 1 -#define TCF_CBQ_LSS_ISOLATED 2 - unsigned char ewma_log; - unsigned char level; -#define TCF_CBQ_LSS_FLAGS 1 -#define TCF_CBQ_LSS_EWMA 2 -#define TCF_CBQ_LSS_MAXIDLE 4 -#define TCF_CBQ_LSS_MINIDLE 8 -#define TCF_CBQ_LSS_OFFTIME 0x10 -#define TCF_CBQ_LSS_AVPKT 0x20 - __u32 maxidle; - __u32 minidle; - __u32 offtime; - __u32 avpkt; -}; - -struct tc_cbq_wrropt { - unsigned char flags; - unsigned char priority; - unsigned char cpriority; - unsigned char __reserved; - __u32 allot; - __u32 weight; -}; - -struct tc_cbq_ovl { - unsigned char strategy; -#define TC_CBQ_OVL_CLASSIC 0 -#define TC_CBQ_OVL_DELAY 1 -#define TC_CBQ_OVL_LOWPRIO 2 -#define TC_CBQ_OVL_DROP 3 -#define TC_CBQ_OVL_RCLASSIC 4 - unsigned char priority2; - __u16 pad; - __u32 penalty; -}; - -struct tc_cbq_police { - unsigned char police; - unsigned char __res1; - unsigned short __res2; -}; - -struct tc_cbq_fopt { - __u32 split; - __u32 defmap; - __u32 defchange; -}; - -struct tc_cbq_xstats { - __u32 borrows; - __u32 overactions; - __s32 avgidle; - __s32 undertime; -}; - -enum { - TCA_CBQ_UNSPEC, - TCA_CBQ_LSSOPT, - TCA_CBQ_WRROPT, - TCA_CBQ_FOPT, - TCA_CBQ_OVL_STRATEGY, - TCA_CBQ_RATE, - TCA_CBQ_RTAB, - TCA_CBQ_POLICE, - __TCA_CBQ_MAX, -}; - -#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) - -/* dsmark section */ - -enum { - TCA_DSMARK_UNSPEC, - TCA_DSMARK_INDICES, - TCA_DSMARK_DEFAULT_INDEX, - TCA_DSMARK_SET_TC_INDEX, - TCA_DSMARK_MASK, - TCA_DSMARK_VALUE, - __TCA_DSMARK_MAX, -}; - -#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) - -/* ATM section */ - -enum { - TCA_ATM_UNSPEC, - TCA_ATM_FD, /* file/socket descriptor */ - TCA_ATM_PTR, /* pointer to descriptor - later */ - TCA_ATM_HDR, /* LL header */ - TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ - TCA_ATM_ADDR, /* PVC address (for output only) */ - TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ - __TCA_ATM_MAX, -}; - -#define TCA_ATM_MAX (__TCA_ATM_MAX - 1) - /* Network emulator */ enum { @@ -941,15 +832,22 @@ enum { TCA_FQ_HORIZON_DROP, /* drop packets beyond horizon, or cap their EDT */ + TCA_FQ_PRIOMAP, /* prio2band */ + + TCA_FQ_WEIGHTS, /* Weights for each band */ + __TCA_FQ_MAX }; #define TCA_FQ_MAX (__TCA_FQ_MAX - 1) +#define FQ_BANDS 3 +#define FQ_MIN_WEIGHT 16384 + struct tc_fq_qd_stats { __u64 gc_flows; - __u64 highprio_packets; - __u64 tcp_retrans; + __u64 highprio_packets; /* obsolete */ + __u64 tcp_retrans; /* obsolete */ __u64 throttled; __u64 flows_plimit; __u64 pkts_too_long; @@ -962,6 +860,10 @@ struct tc_fq_qd_stats { __u64 ce_mark; /* packets above ce_threshold */ __u64 horizon_drops; __u64 horizon_caps; + __u64 fastpath_packets; + __u64 band_drops[FQ_BANDS]; + __u32 band_pkt_count[FQ_BANDS]; + __u32 pad; }; /* Heavy-Hitter Filter */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 3c36aeade991..370ed14b1ae0 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -283,7 +283,8 @@ struct prctl_mm_map { /* Memory deny write / execute */ #define PR_SET_MDWE 65 -# define PR_MDWE_REFUSE_EXEC_GAIN 1 +# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0) +# define PR_MDWE_NO_INHERIT (1UL << 1) #define PR_GET_MDWE 66 diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 1c9da485318f..b44ba7dcdefc 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -68,6 +68,7 @@ typedef enum { SEV_RET_INVALID_PARAM, SEV_RET_RESOURCE_LIMIT, SEV_RET_SECURE_DATA_INVALID, + SEV_RET_INVALID_KEY = 0x27, SEV_RET_MAX, } sev_ret_code; diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 05cc35fc94ac..da700999cad4 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -224,6 +224,8 @@ struct ptp_pin_desc { _IOWR(PTP_CLK_MAGIC, 17, struct ptp_sys_offset_precise) #define PTP_SYS_OFFSET_EXTENDED2 \ _IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended) +#define PTP_MASK_CLEAR_ALL _IO(PTP_CLK_MAGIC, 19) +#define PTP_MASK_EN_SINGLE _IOW(PTP_CLK_MAGIC, 20, unsigned int) struct ptp_extts_event { struct ptp_clock_time t; /* Time event occured. */ diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index 6c0aa577730f..5a43c23f53bf 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -2,15 +2,11 @@ /* md_p.h : physical layout of Linux RAID devices Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. - - You should have received a copy of the GNU General Public License - (for example /usr/src/linux/COPYING); if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _MD_P_H @@ -237,7 +233,7 @@ struct mdp_superblock_1 { char set_name[32]; /* set and interpreted by user-space */ __le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/ - __le32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */ + __le32 level; /* 0,1,4,5 */ __le32 layout; /* only for raid5 and raid10 currently */ __le64 size; /* used size of component devices, in 512byte sectors */ diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h index 105307244961..7be89a4906e7 100644 --- a/include/uapi/linux/raid/md_u.h +++ b/include/uapi/linux/raid/md_u.h @@ -2,15 +2,11 @@ /* md_u.h : user <=> kernel API between Linux raidtools and RAID drivers Copyright (C) 1998 Ingo Molnar - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. - - You should have received a copy of the GNU General Public License - (for example /usr/src/linux/COPYING); if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _UAPI_MD_U_H @@ -107,11 +103,6 @@ typedef struct mdu_array_info_s { } mdu_array_info_t; -/* non-obvious values for 'level' */ -#define LEVEL_MULTIPATH (-4) -#define LEVEL_LINEAR (-1) -#define LEVEL_FAULTY (-5) - /* we need a value for 'no level specified' and 0 * means 'raid0', so we need something else. This is * for internal use only diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h index ac5d6a3031db..4fc22908bc09 100644 --- a/include/uapi/linux/resource.h +++ b/include/uapi/linux/resource.h @@ -2,7 +2,7 @@ #ifndef _UAPI_LINUX_RESOURCE_H #define _UAPI_LINUX_RESOURCE_H -#include <linux/time.h> +#include <linux/time_types.h> #include <linux/types.h> /* diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h index 1637e68177d9..f0c8da2b185b 100644 --- a/include/uapi/linux/rpmsg.h +++ b/include/uapi/linux/rpmsg.h @@ -43,4 +43,14 @@ struct rpmsg_endpoint_info { */ #define RPMSG_RELEASE_DEV_IOCTL _IOW(0xb5, 0x4, struct rpmsg_endpoint_info) +/** + * Get the flow control state of the remote rpmsg char device. + */ +#define RPMSG_GET_OUTGOING_FLOWCONTROL _IOR(0xb5, 0x5, int) + +/** + * Set the flow control state of the local rpmsg char device. + */ +#define RPMSG_SET_INCOMING_FLOWCONTROL _IOR(0xb5, 0x6, int) + #endif diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 51c13cf9c5ae..3b687d20c9ed 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -502,13 +502,17 @@ enum { #define RTAX_MAX (__RTAX_MAX - 1) -#define RTAX_FEATURE_ECN (1 << 0) -#define RTAX_FEATURE_SACK (1 << 1) -#define RTAX_FEATURE_TIMESTAMP (1 << 2) -#define RTAX_FEATURE_ALLFRAG (1 << 3) - -#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \ - RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG) +#define RTAX_FEATURE_ECN (1 << 0) +#define RTAX_FEATURE_SACK (1 << 1) /* unused */ +#define RTAX_FEATURE_TIMESTAMP (1 << 2) /* unused */ +#define RTAX_FEATURE_ALLFRAG (1 << 3) /* unused */ +#define RTAX_FEATURE_TCP_USEC_TS (1 << 4) + +#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | \ + RTAX_FEATURE_SACK | \ + RTAX_FEATURE_TIMESTAMP | \ + RTAX_FEATURE_ALLFRAG | \ + RTAX_FEATURE_TCP_USEC_TS) struct rta_session { __u8 proto; diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h index f2c4589d4dbf..90662385689b 100644 --- a/include/uapi/linux/sched/types.h +++ b/include/uapi/linux/sched/types.h @@ -4,10 +4,6 @@ #include <linux/types.h> -struct sched_param { - int sched_priority; -}; - #define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ #define SCHED_ATTR_SIZE_VER1 56 /* add: util_{min,max} */ diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h index 53bc1af67a41..de9b4733607e 100644 --- a/include/uapi/linux/serial.h +++ b/include/uapi/linux/serial.h @@ -11,6 +11,7 @@ #ifndef _UAPI_LINUX_SERIAL_H #define _UAPI_LINUX_SERIAL_H +#include <linux/const.h> #include <linux/types.h> #include <linux/tty_flags.h> @@ -137,17 +138,20 @@ struct serial_icounter_struct { * * %SER_RS485_ADDRB - Enable RS485 addressing mode. * * %SER_RS485_ADDR_RECV - Receive address filter (enables @addr_recv). Requires %SER_RS485_ADDRB. * * %SER_RS485_ADDR_DEST - Destination address (enables @addr_dest). Requires %SER_RS485_ADDRB. + * * %SER_RS485_MODE_RS422 - Enable RS422. Requires %SER_RS485_ENABLED. */ struct serial_rs485 { __u32 flags; -#define SER_RS485_ENABLED (1 << 0) -#define SER_RS485_RTS_ON_SEND (1 << 1) -#define SER_RS485_RTS_AFTER_SEND (1 << 2) -#define SER_RS485_RX_DURING_TX (1 << 4) -#define SER_RS485_TERMINATE_BUS (1 << 5) -#define SER_RS485_ADDRB (1 << 6) -#define SER_RS485_ADDR_RECV (1 << 7) -#define SER_RS485_ADDR_DEST (1 << 8) +#define SER_RS485_ENABLED _BITUL(0) +#define SER_RS485_RTS_ON_SEND _BITUL(1) +#define SER_RS485_RTS_AFTER_SEND _BITUL(2) +/* Placeholder for bit 3: SER_RS485_RTS_BEFORE_SEND, which isn't used anymore */ +#define SER_RS485_RX_DURING_TX _BITUL(4) +#define SER_RS485_TERMINATE_BUS _BITUL(5) +#define SER_RS485_ADDRB _BITUL(6) +#define SER_RS485_ADDR_RECV _BITUL(7) +#define SER_RS485_ADDR_DEST _BITUL(8) +#define SER_RS485_MODE_RS422 _BITUL(9) __u32 delay_rts_before_send; __u32 delay_rts_after_send; diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index add349889d0a..9c007a106330 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -1,22 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* - * linux/drivers/char/serial_core.h - * * Copyright (C) 2000 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _UAPILINUX_SERIAL_CORE_H #define _UAPILINUX_SERIAL_CORE_H @@ -27,6 +11,8 @@ * The type definitions. These are from Ted Ts'o's serial.h * By historical reasons the values from 0 to 13 are defined * in the include/uapi/linux/serial.h, do not define them here. + * Values 0 to 19 are used by setserial from busybox and must never + * be modified. */ #define PORT_NS16550A 14 #define PORT_XSCALE 15 @@ -245,4 +231,7 @@ /* Sunplus UART */ #define PORT_SUNPLUS 123 +/* Generic type identifier for ports which type is not important to userspace. */ +#define PORT_GENERIC (-1) + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h index 08b3527e1b93..9c987b04e2d0 100644 --- a/include/uapi/linux/serial_reg.h +++ b/include/uapi/linux/serial_reg.h @@ -49,6 +49,7 @@ #define UART_IIR_FIFO_ENABLED_8250 0x00 /* 8250: no FIFO */ #define UART_IIR_FIFO_ENABLED_16550 0x80 /* 16550: (broken/unusable) FIFO */ #define UART_IIR_FIFO_ENABLED_16550A 0xc0 /* 16550A: FIFO enabled */ +#define UART_IIR_FIFO_ENABLED_16750 0xe0 /* 16750: 64 bytes FIFO enabled */ #define UART_FCR 2 /* Out: FIFO Control Register */ #define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */ diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h index 2aa39112cf8d..154a87a1eca9 100644 --- a/include/uapi/linux/sev-guest.h +++ b/include/uapi/linux/sev-guest.h @@ -14,9 +14,11 @@ #include <linux/types.h> +#define SNP_REPORT_USER_DATA_SIZE 64 + struct snp_report_req { /* user data that should be included in the report */ - __u8 user_data[64]; + __u8 user_data[SNP_REPORT_USER_DATA_SIZE]; /* The vmpl level to be included in the report */ __u32 vmpl; diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index 837fcd4b0abc..b531e3ef011a 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -160,6 +160,8 @@ enum { SMC_NLA_LGR_D_CHID, /* u16 */ SMC_NLA_LGR_D_PAD, /* flag */ SMC_NLA_LGR_D_V2_COMMON, /* nest */ + SMC_NLA_LGR_D_EXT_GID, /* u64 */ + SMC_NLA_LGR_D_PEER_EXT_GID, /* u64 */ __SMC_NLA_LGR_D_MAX, SMC_NLA_LGR_D_MAX = __SMC_NLA_LGR_D_MAX - 1 }; diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h index 8cb3a6fef553..58eceb7f5df2 100644 --- a/include/uapi/linux/smc_diag.h +++ b/include/uapi/linux/smc_diag.h @@ -107,6 +107,8 @@ struct smcd_diag_dmbinfo { /* SMC-D Socket internals */ __aligned_u64 my_gid; /* My GID */ __aligned_u64 token; /* Token of DMB */ __aligned_u64 peer_token; /* Token of remote DMBE */ + __aligned_u64 peer_gid_ext; /* Peer GID (extended part) */ + __aligned_u64 my_gid_ext; /* My GID (extended part) */ }; #endif /* _UAPI_SMC_DIAG_H_ */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 26f33a4c253d..a0819c6a5988 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -24,7 +24,7 @@ enum IPSTATS_MIB_INOCTETS, /* InOctets */ IPSTATS_MIB_INDELIVERS, /* InDelivers */ IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */ - IPSTATS_MIB_OUTPKTS, /* OutRequests */ + IPSTATS_MIB_OUTREQUESTS, /* OutRequests */ IPSTATS_MIB_OUTOCTETS, /* OutOctets */ /* other fields */ IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */ @@ -57,6 +57,7 @@ enum IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */ IPSTATS_MIB_CEPKTS, /* InCEPkts */ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */ + IPSTATS_MIB_OUTPKTS, /* OutTransmits */ __IPSTATS_MIB_MAX }; @@ -296,6 +297,11 @@ enum LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */ LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */ LINUX_MIB_TCPPLBREHASH, /* TCPPLBRehash */ + LINUX_MIB_TCPAOREQUIRED, /* TCPAORequired */ + LINUX_MIB_TCPAOBAD, /* TCPAOBad */ + LINUX_MIB_TCPAOKEYNOTFOUND, /* TCPAOKeyNotFound */ + LINUX_MIB_TCPAOGOOD, /* TCPAOGood */ + LINUX_MIB_TCPAODROPPEDICMPS, /* TCPAODroppedIcmps */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 7cab2c65d3d7..2f2ee82d5517 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -154,6 +154,7 @@ struct statx { #define STATX_BTIME 0x00000800U /* Want/got stx_btime */ #define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ #define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */ +#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index 7c3fc3980881..2ec6f35cda32 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -27,8 +27,13 @@ union { \ struct { MEMBERS } ATTRS; \ struct TAG { MEMBERS } ATTRS NAME; \ - } + } ATTRS +#ifdef __cplusplus +/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */ +#define __DECLARE_FLEX_ARRAY(T, member) \ + T member[0] +#else /** * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union * @@ -49,3 +54,5 @@ #ifndef __counted_by #define __counted_by(m) #endif + +#endif /* _UAPI_LINUX_STDDEF_H */ diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h index ff0a931833e2..ff1f38889dcf 100644 --- a/include/uapi/linux/sync_file.h +++ b/include/uapi/linux/sync_file.h @@ -76,6 +76,27 @@ struct sync_file_info { __u64 sync_fence_info; }; +/** + * struct sync_set_deadline - SYNC_IOC_SET_DEADLINE - set a deadline hint on a fence + * @deadline_ns: absolute time of the deadline + * @pad: must be zero + * + * Allows userspace to set a deadline on a fence, see &dma_fence_set_deadline + * + * The timebase for the deadline is CLOCK_MONOTONIC (same as vblank). For + * example + * + * clock_gettime(CLOCK_MONOTONIC, &t); + * deadline_ns = (t.tv_sec * 1000000000L) + t.tv_nsec + ns_until_deadline + */ +struct sync_set_deadline { + __u64 deadline_ns; + /* Not strictly needed for alignment but gives some possibility + * for future extension: + */ + __u64 pad; +}; + #define SYNC_IOC_MAGIC '>' /* @@ -87,5 +108,6 @@ struct sync_file_info { #define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data) #define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info) +#define SYNC_IOC_SET_DEADLINE _IOW(SYNC_IOC_MAGIC, 5, struct sync_set_deadline) #endif /* _UAPI_LINUX_SYNC_H */ diff --git a/include/uapi/linux/tc_act/tc_ipt.h b/include/uapi/linux/tc_act/tc_ipt.h deleted file mode 100644 index c48d7da6750d..000000000000 --- a/include/uapi/linux/tc_act/tc_ipt.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef __LINUX_TC_IPT_H -#define __LINUX_TC_IPT_H - -#include <linux/pkt_cls.h> - -enum { - TCA_IPT_UNSPEC, - TCA_IPT_TABLE, - TCA_IPT_HOOK, - TCA_IPT_INDEX, - TCA_IPT_CNT, - TCA_IPT_TM, - TCA_IPT_TARG, - TCA_IPT_PAD, - __TCA_IPT_MAX -}; -#define TCA_IPT_MAX (__TCA_IPT_MAX - 1) - -#endif diff --git a/include/uapi/linux/tc_act/tc_mirred.h b/include/uapi/linux/tc_act/tc_mirred.h index 2500a0005d05..c61e76f3c23b 100644 --- a/include/uapi/linux/tc_act/tc_mirred.h +++ b/include/uapi/linux/tc_act/tc_mirred.h @@ -21,6 +21,7 @@ enum { TCA_MIRRED_TM, TCA_MIRRED_PARMS, TCA_MIRRED_PAD, + TCA_MIRRED_BLOCKID, __TCA_MIRRED_MAX }; #define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1) diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 879eeb0a084b..c07e9f90c084 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -129,6 +129,11 @@ enum { #define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */ +#define TCP_AO_ADD_KEY 38 /* Add/Set MKT */ +#define TCP_AO_DEL_KEY 39 /* Delete MKT */ +#define TCP_AO_INFO 40 /* Set/list TCP-AO per-socket options */ +#define TCP_AO_GET_KEYS 41 /* List MKT(s) */ +#define TCP_AO_REPAIR 42 /* Get/Set SNEs and ISNs */ #define TCP_REPAIR_ON 1 #define TCP_REPAIR_OFF 0 @@ -170,6 +175,7 @@ enum tcp_fastopen_client_fail { #define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */ #define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */ #define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */ +#define TCPI_OPT_USEC_TS 64 /* usec timestamps */ /* * Sender's congestion state indicating normal or abnormal situations @@ -289,6 +295,18 @@ struct tcp_info { */ __u32 tcpi_rehash; /* PLB or timeout triggered rehash attempts */ + + __u16 tcpi_total_rto; /* Total number of RTO timeouts, including + * SYN/SYN-ACK and recurring timeouts. + */ + __u16 tcpi_total_rto_recoveries; /* Total number of RTO + * recoveries, including any + * unfinished recovery. + */ + __u32 tcpi_total_rto_time; /* Total time spent in RTO recoveries + * in milliseconds, including any + * unfinished recovery. + */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ @@ -348,6 +366,106 @@ struct tcp_diag_md5sig { __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; }; +#define TCP_AO_MAXKEYLEN 80 + +#define TCP_AO_KEYF_IFINDEX (1 << 0) /* L3 ifindex for VRF */ +#define TCP_AO_KEYF_EXCLUDE_OPT (1 << 1) /* "Indicates whether TCP + * options other than TCP-AO + * are included in the MAC + * calculation" + */ + +struct tcp_ao_add { /* setsockopt(TCP_AO_ADD_KEY) */ + struct __kernel_sockaddr_storage addr; /* peer's address for the key */ + char alg_name[64]; /* crypto hash algorithm to use */ + __s32 ifindex; /* L3 dev index for VRF */ + __u32 set_current :1, /* set key as Current_key at once */ + set_rnext :1, /* request it from peer with RNext_key */ + reserved :30; /* must be 0 */ + __u16 reserved2; /* padding, must be 0 */ + __u8 prefix; /* peer's address prefix */ + __u8 sndid; /* SendID for outgoing segments */ + __u8 rcvid; /* RecvID to match for incoming seg */ + __u8 maclen; /* length of authentication code (hash) */ + __u8 keyflags; /* see TCP_AO_KEYF_ */ + __u8 keylen; /* length of ::key */ + __u8 key[TCP_AO_MAXKEYLEN]; +} __attribute__((aligned(8))); + +struct tcp_ao_del { /* setsockopt(TCP_AO_DEL_KEY) */ + struct __kernel_sockaddr_storage addr; /* peer's address for the key */ + __s32 ifindex; /* L3 dev index for VRF */ + __u32 set_current :1, /* corresponding ::current_key */ + set_rnext :1, /* corresponding ::rnext */ + del_async :1, /* only valid for listen sockets */ + reserved :29; /* must be 0 */ + __u16 reserved2; /* padding, must be 0 */ + __u8 prefix; /* peer's address prefix */ + __u8 sndid; /* SendID for outgoing segments */ + __u8 rcvid; /* RecvID to match for incoming seg */ + __u8 current_key; /* KeyID to set as Current_key */ + __u8 rnext; /* KeyID to set as Rnext_key */ + __u8 keyflags; /* see TCP_AO_KEYF_ */ +} __attribute__((aligned(8))); + +struct tcp_ao_info_opt { /* setsockopt(TCP_AO_INFO), getsockopt(TCP_AO_INFO) */ + /* Here 'in' is for setsockopt(), 'out' is for getsockopt() */ + __u32 set_current :1, /* in/out: corresponding ::current_key */ + set_rnext :1, /* in/out: corresponding ::rnext */ + ao_required :1, /* in/out: don't accept non-AO connects */ + set_counters :1, /* in: set/clear ::pkt_* counters */ + accept_icmps :1, /* in/out: accept incoming ICMPs */ + reserved :27; /* must be 0 */ + __u16 reserved2; /* padding, must be 0 */ + __u8 current_key; /* in/out: KeyID of Current_key */ + __u8 rnext; /* in/out: keyid of RNext_key */ + __u64 pkt_good; /* in/out: verified segments */ + __u64 pkt_bad; /* in/out: failed verification */ + __u64 pkt_key_not_found; /* in/out: could not find a key to verify */ + __u64 pkt_ao_required; /* in/out: segments missing TCP-AO sign */ + __u64 pkt_dropped_icmp; /* in/out: ICMPs that were ignored */ +} __attribute__((aligned(8))); + +struct tcp_ao_getsockopt { /* getsockopt(TCP_AO_GET_KEYS) */ + struct __kernel_sockaddr_storage addr; /* in/out: dump keys for peer + * with this address/prefix + */ + char alg_name[64]; /* out: crypto hash algorithm */ + __u8 key[TCP_AO_MAXKEYLEN]; + __u32 nkeys; /* in: size of the userspace buffer + * @optval, measured in @optlen - the + * sizeof(struct tcp_ao_getsockopt) + * out: number of keys that matched + */ + __u16 is_current :1, /* in: match and dump Current_key, + * out: the dumped key is Current_key + */ + + is_rnext :1, /* in: match and dump RNext_key, + * out: the dumped key is RNext_key + */ + get_all :1, /* in: dump all keys */ + reserved :13; /* padding, must be 0 */ + __u8 sndid; /* in/out: dump keys with SendID */ + __u8 rcvid; /* in/out: dump keys with RecvID */ + __u8 prefix; /* in/out: dump keys with address/prefix */ + __u8 maclen; /* out: key's length of authentication + * code (hash) + */ + __u8 keyflags; /* in/out: see TCP_AO_KEYF_ */ + __u8 keylen; /* out: length of ::key */ + __s32 ifindex; /* in/out: L3 dev index for VRF */ + __u64 pkt_good; /* out: verified segments */ + __u64 pkt_bad; /* out: segments that failed verification */ +} __attribute__((aligned(8))); + +struct tcp_ao_repair { /* {s,g}etsockopt(TCP_AO_REPAIR) */ + __be32 snt_isn; + __be32 rcv_isn; + __u32 snd_sne; + __u32 rcv_sne; +} __attribute__((aligned(8))); + /* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */ #define TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT 0x1 diff --git a/include/uapi/linux/thp7312.h b/include/uapi/linux/thp7312.h new file mode 100644 index 000000000000..2b629e05daf9 --- /dev/null +++ b/include/uapi/linux/thp7312.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * THine THP7312 user space header file. + * + * Copyright (C) 2021 THine Electronics, Inc. + * Copyright (C) 2023 Ideas on Board Oy + */ + +#ifndef __UAPI_THP7312_H_ +#define __UAPI_THP7312_H_ + +#include <linux/v4l2-controls.h> + +#define V4L2_CID_THP7312_LOW_LIGHT_COMPENSATION (V4L2_CID_USER_THP7312_BASE + 0x01) +#define V4L2_CID_THP7312_AUTO_FOCUS_METHOD (V4L2_CID_USER_THP7312_BASE + 0x02) +#define V4L2_CID_THP7312_NOISE_REDUCTION_AUTO (V4L2_CID_USER_THP7312_BASE + 0x03) +#define V4L2_CID_THP7312_NOISE_REDUCTION_ABSOLUTE (V4L2_CID_USER_THP7312_BASE + 0x04) + +#endif /* __UAPI_THP7312_H_ */ diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h index d77ee6b65328..078098e73fd3 100644 --- a/include/uapi/linux/usb/functionfs.h +++ b/include/uapi/linux/usb/functionfs.h @@ -73,8 +73,10 @@ struct usb_os_desc_header { struct usb_ext_compat_desc { __u8 bFirstInterfaceNumber; __u8 Reserved1; - __u8 CompatibleID[8]; - __u8 SubCompatibleID[8]; + __struct_group(/* no tag */, IDs, /* no attrs */, + __u8 CompatibleID[8]; + __u8 SubCompatibleID[8]; + ); __u8 Reserved2[6]; }; diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h index c7d2199134d7..f0224a8dc858 100644 --- a/include/uapi/linux/usb/raw_gadget.h +++ b/include/uapi/linux/usb/raw_gadget.h @@ -44,6 +44,16 @@ enum usb_raw_event_type { /* This event is queued when a new control request arrived to ep0. */ USB_RAW_EVENT_CONTROL = 2, + /* + * These events are queued when the gadget driver is suspended, + * resumed, reset, or disconnected. Note that some UDCs (e.g. dwc2) + * report a disconnect event instead of a reset. + */ + USB_RAW_EVENT_SUSPEND = 3, + USB_RAW_EVENT_RESUME = 4, + USB_RAW_EVENT_RESET = 5, + USB_RAW_EVENT_DISCONNECT = 6, + /* The list might grow in the future. */ }; @@ -54,8 +64,8 @@ enum usb_raw_event_type { * actual length of the fetched event data. * @data: A buffer to store the fetched event data. * - * Currently the fetched data buffer is empty for USB_RAW_EVENT_CONNECT, - * and contains struct usb_ctrlrequest for USB_RAW_EVENT_CONTROL. + * The fetched event data buffer contains struct usb_ctrlrequest for + * USB_RAW_EVENT_CONTROL and is empty for other events. */ struct usb_raw_event { __u32 type; diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h index 2984aae4a2b4..f74f3aedd49c 100644 --- a/include/uapi/linux/user_events.h +++ b/include/uapi/linux/user_events.h @@ -17,6 +17,15 @@ /* Create dynamic location entry within a 32-bit value */ #define DYN_LOC(offset, size) ((size) << 16 | (offset)) +/* List of supported registration flags */ +enum user_reg_flag { + /* Event will not delete upon last reference closing */ + USER_EVENT_REG_PERSIST = 1U << 0, + + /* This value or above is currently non-ABI */ + USER_EVENT_REG_MAX = 1U << 1, +}; + /* * Describes an event registration and stores the results of the registration. * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum @@ -33,7 +42,7 @@ struct user_reg { /* Input: Enable size in bytes at address */ __u8 enable_size; - /* Input: Flags for future use, set to 0 */ + /* Input: Flags to use, if any */ __u16 flags; /* Input: Address to update when enabled */ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 62151706c5a3..2841e4ea8f2c 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -40,7 +40,9 @@ UFFD_FEATURE_EXACT_ADDRESS | \ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \ UFFD_FEATURE_WP_UNPOPULATED | \ - UFFD_FEATURE_POISON) + UFFD_FEATURE_POISON | \ + UFFD_FEATURE_WP_ASYNC | \ + UFFD_FEATURE_MOVE) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -49,6 +51,7 @@ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ (__u64)1 << _UFFDIO_ZEROPAGE | \ + (__u64)1 << _UFFDIO_MOVE | \ (__u64)1 << _UFFDIO_WRITEPROTECT | \ (__u64)1 << _UFFDIO_CONTINUE | \ (__u64)1 << _UFFDIO_POISON) @@ -72,6 +75,7 @@ #define _UFFDIO_WAKE (0x02) #define _UFFDIO_COPY (0x03) #define _UFFDIO_ZEROPAGE (0x04) +#define _UFFDIO_MOVE (0x05) #define _UFFDIO_WRITEPROTECT (0x06) #define _UFFDIO_CONTINUE (0x07) #define _UFFDIO_POISON (0x08) @@ -91,6 +95,8 @@ struct uffdio_copy) #define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \ struct uffdio_zeropage) +#define UFFDIO_MOVE _IOWR(UFFDIO, _UFFDIO_MOVE, \ + struct uffdio_move) #define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \ struct uffdio_writeprotect) #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \ @@ -216,6 +222,14 @@ struct uffdio_api { * (i.e. empty ptes). This will be the default behavior for shmem * & hugetlbfs, so this flag only affects anonymous memory behavior * when userfault write-protection mode is registered. + * + * UFFD_FEATURE_WP_ASYNC indicates that userfaultfd write-protection + * asynchronous mode is supported in which the write fault is + * automatically resolved and write-protection is un-set. + * It implies UFFD_FEATURE_WP_UNPOPULATED. + * + * UFFD_FEATURE_MOVE indicates that the kernel supports moving an + * existing page contents from userspace. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -232,6 +246,8 @@ struct uffdio_api { #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) #define UFFD_FEATURE_WP_UNPOPULATED (1<<13) #define UFFD_FEATURE_POISON (1<<14) +#define UFFD_FEATURE_WP_ASYNC (1<<15) +#define UFFD_FEATURE_MOVE (1<<16) __u64 features; __u64 ioctls; @@ -340,6 +356,24 @@ struct uffdio_poison { __s64 updated; }; +struct uffdio_move { + __u64 dst; + __u64 src; + __u64 len; + /* + * Especially if used to atomically remove memory from the + * address space the wake on the dst range is not needed. + */ +#define UFFDIO_MOVE_MODE_DONTWAKE ((__u64)1<<0) +#define UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES ((__u64)1<<1) + __u64 mode; + /* + * "move" is written by the ioctl and must be at the end: the + * copy_from_user will not read the last 8 bytes. + */ + __s64 move; +}; + /* * Flags for the userfaultfd(2) system call itself. */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index c3604a0a3e30..99c3f5e99da7 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -203,6 +203,18 @@ enum v4l2_colorfx { */ #define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0) +/* + * The base for Nuvoton NPCM driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0) + +/* + * The base for THine THP7312 driver controls. + * We reserve 32 controls for this driver. + */ +#define V4L2_CID_USER_THP7312_BASE (V4L2_CID_USER_BASE + 0x11c0) + /* MPEG-class control IDs */ /* The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical */ diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h index 4a195b68f28f..7048c51581c6 100644 --- a/include/uapi/linux/v4l2-subdev.h +++ b/include/uapi/linux/v4l2-subdev.h @@ -116,13 +116,15 @@ struct v4l2_subdev_frame_size_enum { * @pad: pad number, as reported by the media API * @interval: frame interval in seconds * @stream: stream number, defined in subdev routing + * @which: interval type (from enum v4l2_subdev_format_whence) * @reserved: drivers and applications must zero this array */ struct v4l2_subdev_frame_interval { __u32 pad; struct v4l2_fract interval; __u32 stream; - __u32 reserved[8]; + __u32 which; + __u32 reserved[7]; }; /** @@ -133,7 +135,7 @@ struct v4l2_subdev_frame_interval { * @width: frame width in pixels * @height: frame height in pixels * @interval: frame interval in seconds - * @which: format type (from enum v4l2_subdev_format_whence) + * @which: interval type (from enum v4l2_subdev_format_whence) * @stream: stream number, defined in subdev routing * @reserved: drivers and applications must zero this array */ @@ -239,7 +241,14 @@ struct v4l2_subdev_routing { * set (which is the default), the 'stream' fields will be forced to 0 by the * kernel. */ - #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1U << 0) +#define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0) + +/* + * The client is aware of the struct v4l2_subdev_frame_interval which field. If + * this is not set (which is the default), the which field is forced to + * V4L2_SUBDEV_FORMAT_ACTIVE by the kernel. + */ +#define V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH (1ULL << 1) /** * struct v4l2_subdev_client_capability - Capabilities of the client accessing diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index afc1369216d9..2b68e6cdf190 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -277,8 +277,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */ __u32 index; /* Region index */ __u32 cap_offset; /* Offset within info struct of first cap */ - __u64 size; /* Region size (bytes) */ - __u64 offset; /* Region offset from start of device fd */ + __aligned_u64 size; /* Region size (bytes) */ + __aligned_u64 offset; /* Region offset from start of device fd */ }; #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8) @@ -294,8 +294,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1 struct vfio_region_sparse_mmap_area { - __u64 offset; /* Offset of mmap'able area within region */ - __u64 size; /* Size of mmap'able area */ + __aligned_u64 offset; /* Offset of mmap'able area within region */ + __aligned_u64 size; /* Size of mmap'able area */ }; struct vfio_region_info_cap_sparse_mmap { @@ -450,9 +450,9 @@ struct vfio_device_migration_info { VFIO_DEVICE_STATE_V1_RESUMING) __u32 reserved; - __u64 pending_bytes; - __u64 data_offset; - __u64 data_size; + __aligned_u64 pending_bytes; + __aligned_u64 data_offset; + __aligned_u64 data_size; }; /* @@ -476,7 +476,7 @@ struct vfio_device_migration_info { struct vfio_region_info_cap_nvlink2_ssatgt { struct vfio_info_cap_header header; - __u64 tgt; + __aligned_u64 tgt; }; /* @@ -816,7 +816,7 @@ struct vfio_device_gfx_plane_info { __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */ /* out */ __u32 drm_format; /* drm format of plane */ - __u64 drm_format_mod; /* tiled mode */ + __aligned_u64 drm_format_mod; /* tiled mode */ __u32 width; /* width of plane */ __u32 height; /* height of plane */ __u32 stride; /* stride of plane */ @@ -829,6 +829,7 @@ struct vfio_device_gfx_plane_info { __u32 region_index; /* region index */ __u32 dmabuf_id; /* dma-buf id */ }; + __u32 reserved; }; #define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14) @@ -863,9 +864,10 @@ struct vfio_device_ioeventfd { #define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */ #define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */ #define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf) - __u64 offset; /* device fd offset of write */ - __u64 data; /* data to be written */ + __aligned_u64 offset; /* device fd offset of write */ + __aligned_u64 data; /* data to be written */ __s32 fd; /* -1 for de-assignment */ + __u32 reserved; }; #define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) @@ -1217,6 +1219,7 @@ enum vfio_device_mig_state { VFIO_DEVICE_STATE_RUNNING_P2P = 5, VFIO_DEVICE_STATE_PRE_COPY = 6, VFIO_DEVICE_STATE_PRE_COPY_P2P = 7, + VFIO_DEVICE_STATE_NR, }; /** @@ -1434,6 +1437,27 @@ struct vfio_device_feature_mig_data_size { #define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9 +/** + * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device + * based on the operation specified in op flag. + * + * The functionality is incorporated for devices that needs bus master control, + * but the in-band device interface lacks the support. Consequently, it is not + * applicable to PCI devices, as bus master control for PCI devices is managed + * in-band through the configuration space. At present, this feature is supported + * only for CDX devices. + * When the device's BUS MASTER setting is configured as CLEAR, it will result in + * blocking all incoming DMA requests from the device. On the other hand, configuring + * the device's BUS MASTER setting as SET (enable) will grant the device the + * capability to perform DMA to the host memory. + */ +struct vfio_device_feature_bus_master { + __u32 op; +#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */ +#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */ +}; +#define VFIO_DEVICE_FEATURE_BUS_MASTER 10 + /* -------- API for Type1 VFIO IOMMU -------- */ /** @@ -1449,7 +1473,7 @@ struct vfio_iommu_type1_info { __u32 flags; #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ #define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */ - __u64 iova_pgsizes; /* Bitmap of supported page sizes */ + __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */ __u32 cap_offset; /* Offset within info struct of first cap */ __u32 pad; }; diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index f5c48b61ab62..649560c685f1 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -219,4 +219,12 @@ */ #define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E) +/* Get the group for the descriptor table including driver & device areas + * of a virtqueue: read index, write group in num. + * The virtqueue index is stored in the index field of vhost_vring_state. + * The group ID of the descriptor table for this specific virtqueue + * is returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \ + struct vhost_vring_state) #endif diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h index 2d827d22cd99..d7656908f730 100644 --- a/include/uapi/linux/vhost_types.h +++ b/include/uapi/linux/vhost_types.h @@ -185,5 +185,12 @@ struct vhost_vdpa_iova_range { * DRIVER_OK */ #define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6 +/* Device may expose the virtqueue's descriptor area, driver area and + * device area to a different group for ASID binding than where its + * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID. + */ +#define VHOST_BACKEND_F_DESC_ASID 0x7 +/* IOTLB don't flush memory mapping across device reset */ +#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8 #endif diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 78260e5d9985..68e7ac178cc2 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -804,6 +804,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */ #define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */ #define V4L2_PIX_FMT_AJPG v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */ +#define V4L2_PIX_FMT_HEXTILE v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */ /* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */ #define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */ @@ -1034,6 +1035,7 @@ struct v4l2_requestbuffers { #define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4) #define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5) #define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6) +#define V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS (1 << 7) /** * struct v4l2_plane - plane info for multi-planar buffers @@ -1815,7 +1817,7 @@ struct v4l2_ext_control { __s64 __user *p_s64; struct v4l2_area __user *p_area; struct v4l2_ctrl_h264_sps __user *p_h264_sps; - struct v4l2_ctrl_h264_pps *p_h264_pps; + struct v4l2_ctrl_h264_pps __user *p_h264_pps; struct v4l2_ctrl_h264_scaling_matrix __user *p_h264_scaling_matrix; struct v4l2_ctrl_h264_pred_weights __user *p_h264_pred_weights; struct v4l2_ctrl_h264_slice_params __user *p_h264_slice_params; @@ -1836,6 +1838,8 @@ struct v4l2_ext_control { struct v4l2_ctrl_av1_tile_group_entry __user *p_av1_tile_group_entry; struct v4l2_ctrl_av1_frame __user *p_av1_frame; struct v4l2_ctrl_av1_film_grain __user *p_av1_film_grain; + struct v4l2_ctrl_hdr10_cll_info __user *p_hdr10_cll_info; + struct v4l2_ctrl_hdr10_mastering_display __user *p_hdr10_mastering_display; void __user *ptr; }; } __attribute__ ((packed)); @@ -2604,6 +2608,9 @@ struct v4l2_dbg_chip_info { * @flags: additional buffer management attributes (ignored unless the * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability * and configured for MMAP streaming I/O). + * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set + * this field indicate the maximum possible number of buffers + * for this queue. * @reserved: future extensions */ struct v4l2_create_buffers { @@ -2613,7 +2620,8 @@ struct v4l2_create_buffers { struct v4l2_format format; __u32 capabilities; __u32 flags; - __u32 reserved[6]; + __u32 max_num_buffers; + __u32 reserved[5]; }; /* diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 2c712c654165..2445f365bce7 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -52,7 +52,7 @@ * rest are per-device feature bits. */ #define VIRTIO_TRANSPORT_F_START 28 -#define VIRTIO_TRANSPORT_F_END 41 +#define VIRTIO_TRANSPORT_F_END 42 #ifndef VIRTIO_CONFIG_NO_LEGACY /* Do we get callbacks when the ring is completely used, even if we've @@ -105,8 +105,19 @@ */ #define VIRTIO_F_NOTIFICATION_DATA 38 +/* This feature indicates that the driver uses the data provided by the device + * as a virtqueue identifier in available buffer notifications. + */ +#define VIRTIO_F_NOTIF_CONFIG_DATA 39 + /* * This feature indicates that the driver can reset a queue individually. */ #define VIRTIO_F_RING_RESET 40 + +/* + * This feature indicates that the device support administration virtqueues. + */ +#define VIRTIO_F_ADMIN_VQ 41 + #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index f703afc7ad31..ef3810dee7ef 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -166,6 +166,20 @@ struct virtio_pci_common_cfg { __le32 queue_used_hi; /* read-write */ }; +/* + * Warning: do not use sizeof on this: use offsetofend for + * specific fields you need. + */ +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + + __le16 queue_notify_data; /* read-write */ + __le16 queue_reset; /* read-write */ + + __le16 admin_queue_index; /* read-only */ + __le16 admin_queue_num; /* read-only */ +}; + /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ struct virtio_pci_cfg_cap { struct virtio_pci_cap cap; @@ -204,7 +218,72 @@ struct virtio_pci_cfg_cap { #define VIRTIO_PCI_COMMON_Q_USEDHI 52 #define VIRTIO_PCI_COMMON_Q_NDATA 56 #define VIRTIO_PCI_COMMON_Q_RESET 58 +#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60 +#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62 #endif /* VIRTIO_PCI_NO_MODERN */ +/* Admin command status. */ +#define VIRTIO_ADMIN_STATUS_OK 0 + +/* Admin command opcode. */ +#define VIRTIO_ADMIN_CMD_LIST_QUERY 0x0 +#define VIRTIO_ADMIN_CMD_LIST_USE 0x1 + +/* Admin command group type. */ +#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1 + +/* Transitional device admin command. */ +#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE 0x2 +#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ 0x3 +#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE 0x4 +#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5 +#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6 + +struct __packed virtio_admin_cmd_hdr { + __le16 opcode; + /* + * 1 - SR-IOV + * 2-65535 - reserved + */ + __le16 group_type; + /* Unused, reserved for future extensions. */ + __u8 reserved1[12]; + __le64 group_member_id; +}; + +struct __packed virtio_admin_cmd_status { + __le16 status; + __le16 status_qualifier; + /* Unused, reserved for future extensions. */ + __u8 reserved2[4]; +}; + +struct __packed virtio_admin_cmd_legacy_wr_data { + __u8 offset; /* Starting offset of the register(s) to write. */ + __u8 reserved[7]; + __u8 registers[]; +}; + +struct __packed virtio_admin_cmd_legacy_rd_data { + __u8 offset; /* Starting offset of the register(s) to read. */ +}; + +#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0 +#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1 +#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2 + +#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4 + +struct __packed virtio_admin_cmd_notify_info_data { + __u8 flags; /* 0 = end of list, 1 = owner device, 2 = member device */ + __u8 bar; /* BAR of the member or the owner device */ + __u8 padding[6]; + __le64 offset; /* Offset within bar. */ +}; + +struct virtio_admin_cmd_notify_info_result { + struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO]; +}; + #endif diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h index d676b3620383..ede4f3564977 100644 --- a/include/uapi/linux/virtio_pmem.h +++ b/include/uapi/linux/virtio_pmem.h @@ -14,6 +14,13 @@ #include <linux/virtio_ids.h> #include <linux/virtio_config.h> +/* Feature bits */ +/* guest physical address range will be indicated as shared memory region 0 */ +#define VIRTIO_PMEM_F_SHMEM_REGION 0 + +/* shmid of the shared memory region corresponding to the pmem */ +#define VIRTIO_PMEM_SHMEM_REGION_ID 0 + struct virtio_pmem_config { __le64 start; __le64 size; diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h index c60ca33eac59..ed07181d4eff 100644 --- a/include/uapi/linux/vm_sockets.h +++ b/include/uapi/linux/vm_sockets.h @@ -191,4 +191,21 @@ struct sockaddr_vm { #define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9) +/* MSG_ZEROCOPY notifications are encoded in the standard error format, + * sock_extended_err. See Documentation/networking/msg_zerocopy.rst in + * kernel source tree for more details. + */ + +/* 'cmsg_level' field value of 'struct cmsghdr' for notification parsing + * when MSG_ZEROCOPY flag is used on transmissions. + */ + +#define SOL_VSOCK 287 + +/* 'cmsg_type' field value of 'struct cmsghdr' for notification parsing + * when MSG_ZEROCOPY flag is used on transmissions. + */ + +#define VSOCK_RECVERR 1 + #endif /* _UAPI_VM_SOCKETS_H */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 23543c33fee8..6a77328be114 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -4,6 +4,7 @@ #include <linux/in6.h> #include <linux/types.h> +#include <linux/stddef.h> /* All of the structures in this file may not change size as they are * passed into the kernel from userspace via netlink sockets. @@ -33,7 +34,7 @@ struct xfrm_sec_ctx { __u8 ctx_alg; __u16 ctx_len; __u32 ctx_sid; - char ctx_str[]; + char ctx_str[] __counted_by(ctx_len); }; /* Security Context Domains of Interpretation */ diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h index dcb179de4358..e1571603175e 100644 --- a/include/uapi/mtd/ubi-user.h +++ b/include/uapi/mtd/ubi-user.h @@ -248,6 +248,7 @@ enum { * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs * @padding: reserved for future, not used, has to be zeroed * @disable_fm: whether disable fastmap + * @need_resv_pool: whether reserve free pebs for filling pool/wl_pool * * This data structure is used to specify MTD device UBI has to attach and the * parameters it has to use. The number which should be assigned to the new UBI @@ -293,7 +294,8 @@ struct ubi_attach_req { __s32 vid_hdr_offset; __s16 max_beb_per1024; __s8 disable_fm; - __s8 padding[9]; + __s8 need_resv_pool; + __s8 padding[8]; }; /* diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h index 6e7c67a0cca3..c0c34aca90ec 100644 --- a/include/uapi/rdma/bnxt_re-abi.h +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -54,6 +54,8 @@ enum { BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL, BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL, BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL, + BNXT_RE_UCNTX_CMASK_POW2_DISABLED = 0x10ULL, + BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40, }; enum bnxt_re_wqe_mode { @@ -62,6 +64,14 @@ enum bnxt_re_wqe_mode { BNXT_QPLIB_WQE_MODE_INVALID = 0x02, }; +enum { + BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01, +}; + +struct bnxt_re_uctx_req { + __aligned_u64 comp_mask; +}; + struct bnxt_re_uctx_resp { __u32 dev_id; __u32 max_qp; @@ -92,11 +102,16 @@ struct bnxt_re_cq_req { __aligned_u64 cq_handle; }; +enum bnxt_re_cq_mask { + BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT = 0x1, +}; + struct bnxt_re_cq_resp { __u32 cqid; __u32 tail; __u32 phase; __u32 rsvd; + __aligned_u64 comp_mask; }; struct bnxt_re_resize_cq_req { @@ -133,6 +148,7 @@ enum bnxt_re_shpg_offt { enum bnxt_re_objects { BNXT_RE_OBJECT_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT), BNXT_RE_OBJECT_NOTIFY_DRV, + BNXT_RE_OBJECT_GET_TOGGLE_MEM, }; enum bnxt_re_alloc_page_type { @@ -161,4 +177,29 @@ enum bnxt_re_alloc_page_methods { enum bnxt_re_notify_drv_methods { BNXT_RE_METHOD_NOTIFY_DRV = (1U << UVERBS_ID_NS_SHIFT), }; + +/* Toggle mem */ + +enum bnxt_re_get_toggle_mem_type { + BNXT_RE_CQ_TOGGLE_MEM = 0, + BNXT_RE_SRQ_TOGGLE_MEM, +}; + +enum bnxt_re_var_toggle_mem_attrs { + BNXT_RE_TOGGLE_MEM_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + BNXT_RE_TOGGLE_MEM_TYPE, + BNXT_RE_TOGGLE_MEM_RES_ID, + BNXT_RE_TOGGLE_MEM_MMAP_PAGE, + BNXT_RE_TOGGLE_MEM_MMAP_OFFSET, + BNXT_RE_TOGGLE_MEM_MMAP_LENGTH, +}; + +enum bnxt_re_toggle_mem_attrs { + BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum bnxt_re_toggle_mem_methods { + BNXT_RE_METHOD_GET_TOGGLE_MEM = (1U << UVERBS_ID_NS_SHIFT), + BNXT_RE_METHOD_RELEASE_TOGGLE_MEM, +}; #endif /* __BNXT_RE_UVERBS_ABI_H__*/ diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index d94c32f28804..701e2d567e41 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ /* - * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef EFA_ABI_USER_H #define EFA_ABI_USER_H #include <linux/types.h> +#include <rdma/ib_user_ioctl_cmds.h> /* * Increment this value if any changes that break userspace ABI @@ -134,4 +135,22 @@ struct efa_ibv_ex_query_device_resp { __u32 device_caps; }; +enum { + EFA_QUERY_MR_VALIDITY_RECV_IC_ID = 1 << 0, + EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID = 1 << 1, + EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID = 1 << 2, +}; + +enum efa_query_mr_attrs { + EFA_IB_ATTR_QUERY_MR_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY, + EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID, + EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID, + EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID, +}; + +enum efa_mr_methods { + EFA_IB_METHOD_MR_QUERY = (1U << UVERBS_ID_NS_SHIFT), +}; + #endif /* EFA_ABI_USER_H */ diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index 2e68a8b0c92c..c996e151081e 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -52,15 +52,25 @@ struct hns_roce_ib_create_cq_resp { __aligned_u64 cap_flags; }; +enum hns_roce_srq_cap_flags { + HNS_ROCE_SRQ_CAP_RECORD_DB = 1 << 0, +}; + +enum hns_roce_srq_cap_flags_resp { + HNS_ROCE_RSP_SRQ_CAP_RECORD_DB = 1 << 0, +}; + struct hns_roce_ib_create_srq { __aligned_u64 buf_addr; __aligned_u64 db_addr; __aligned_u64 que_addr; + __u32 req_cap_flags; /* Use enum hns_roce_srq_cap_flags */ + __u32 reserved; }; struct hns_roce_ib_create_srq_resp { __u32 srqn; - __u32 reserved; + __u32 cap_flags; /* Use enum hns_roce_srq_cap_flags */ }; struct hns_roce_ib_create_qp { @@ -115,4 +125,9 @@ struct hns_roce_ib_alloc_pd_resp { __u32 pdn; }; +struct hns_roce_ib_create_ah_resp { + __u8 dmac[6]; + __u8 reserved[2]; +}; + #endif /* HNS_ABI_USER_H */ diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index d7c5aaa32744..fe15bc7e9f70 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -220,7 +220,8 @@ enum ib_uverbs_advise_mr_flag { struct ib_uverbs_query_port_resp_ex { struct ib_uverbs_query_port_resp legacy_resp; __u16 port_cap_flags2; - __u8 reserved[6]; + __u8 reserved[2]; + __u32 active_speed_ex; }; struct ib_uverbs_qp_cap { diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index a96b7d2770e1..d4f6a36dffb0 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -37,6 +37,7 @@ #include <linux/types.h> #include <linux/if_ether.h> /* For ETH_ALEN. */ #include <rdma/ib_user_ioctl_verbs.h> +#include <rdma/mlx5_user_ioctl_verbs.h> enum { MLX5_QP_FLAG_SIGNATURE = 1 << 0, @@ -275,6 +276,7 @@ struct mlx5_ib_query_device_resp { __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */ struct mlx5_ib_dci_streams_caps dci_streams_caps; __u16 reserved; + struct mlx5_ib_uapi_reg reg_c0; }; enum mlx5_ib_create_cq_flags { diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index 7af9e09ea556..3189c7f08d17 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -64,6 +64,7 @@ enum mlx5_ib_uapi_dm_type { MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM, MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM, MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM, + MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM, }; enum mlx5_ib_uapi_devx_create_event_channel_flags { diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index e50c357367db..723bbb0f7042 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -299,6 +299,8 @@ enum rdma_nldev_command { RDMA_NLDEV_CMD_STAT_GET_STATUS, + RDMA_NLDEV_CMD_RES_SRQ_GET_RAW, + RDMA_NLDEV_NUM_OPS }; @@ -554,6 +556,8 @@ enum rdma_nldev_attr { RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, /* u32 */ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, /* u8 */ + RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, /* u8 */ + /* * Always the end */ diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h index af735f55b291..6df49724954f 100644 --- a/include/uapi/rdma/siw-abi.h +++ b/include/uapi/rdma/siw-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) or BSD-3-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */ /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ diff --git a/include/uapi/regulator/regulator.h b/include/uapi/regulator/regulator.h new file mode 100644 index 000000000000..71bf71a22e7f --- /dev/null +++ b/include/uapi/regulator/regulator.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Regulator uapi header + * + * Author: Naresh Solanki <Naresh.Solanki@9elements.com> + */ + +#ifndef _UAPI_REGULATOR_H +#define _UAPI_REGULATOR_H + +#ifdef __KERNEL__ +#include <linux/types.h> +#else +#include <stdint.h> +#endif + +/* + * Regulator notifier events. + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * FORCE_DISABLE Regulator forcibly shut down by software. + * VOLTAGE_CHANGE Regulator voltage changed. + * Data passed is old voltage cast to (void *). + * DISABLE Regulator was disabled. + * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed. + * Data passed is "struct pre_voltage_change_data" + * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. + * Data passed is old voltage cast to (void *). + * PRE_DISABLE Regulator is about to be disabled + * ABORT_DISABLE Regulator disable failed for some reason + * + * NOTE: These events can be OR'ed together when passed into handler. + */ + +#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01 +#define REGULATOR_EVENT_OVER_CURRENT 0x02 +#define REGULATOR_EVENT_REGULATION_OUT 0x04 +#define REGULATOR_EVENT_FAIL 0x08 +#define REGULATOR_EVENT_OVER_TEMP 0x10 +#define REGULATOR_EVENT_FORCE_DISABLE 0x20 +#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 +#define REGULATOR_EVENT_DISABLE 0x80 +#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 +#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 +#define REGULATOR_EVENT_PRE_DISABLE 0x400 +#define REGULATOR_EVENT_ABORT_DISABLE 0x800 +#define REGULATOR_EVENT_ENABLE 0x1000 +/* + * Following notifications should be emitted only if detected condition + * is such that the HW is likely to still be working but consumers should + * take a recovery action to prevent problems escalating into errors. + */ +#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000 +#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000 +#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000 +#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000 +#define REGULATOR_EVENT_WARN_MASK 0x1E000 + +struct reg_genl_event { + char reg_name[32]; + uint64_t event; +}; + +/* attributes of reg_genl_family */ +enum { + REG_GENL_ATTR_UNSPEC, + REG_GENL_ATTR_EVENT, /* reg event info needed by user space */ + __REG_GENL_ATTR_MAX, +}; + +#define REG_GENL_ATTR_MAX (__REG_GENL_ATTR_MAX - 1) + +/* commands supported by the reg_genl_family */ +enum { + REG_GENL_CMD_UNSPEC, + REG_GENL_CMD_EVENT, /* kernel->user notifications for reg events */ + __REG_GENL_CMD_MAX, +}; + +#define REG_GENL_CMD_MAX (__REG_GENL_CMD_MAX - 1) + +#define REG_GENL_FAMILY_NAME "reg_event" +#define REG_GENL_VERSION 0x01 +#define REG_GENL_MCAST_GROUP_NAME "reg_mc_group" + +#endif /* _UAPI_REGULATOR_H */ diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h index 907d345f04f9..c72ce387286a 100644 --- a/include/uapi/scsi/scsi_bsg_mpi3mr.h +++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h @@ -491,6 +491,8 @@ struct mpi3_nvme_encapsulated_error_reply { #define MPI3MR_NVME_DATA_FORMAT_PRP 0 #define MPI3MR_NVME_DATA_FORMAT_SGL1 1 #define MPI3MR_NVME_DATA_FORMAT_SGL2 2 +#define MPI3MR_NVMESGL_DATA_SEGMENT 0x00 +#define MPI3MR_NVMESGL_LAST_SEGMENT 0x03 /* MPI3: task management related definitions */ struct mpi3_scsi_task_mgmt_request { diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h index 7c7975f9905e..03f2beadf201 100644 --- a/include/uapi/scsi/scsi_bsg_ufs.h +++ b/include/uapi/scsi/scsi_bsg_ufs.h @@ -83,7 +83,7 @@ struct utp_upiu_header { union { __u8 tm_function; __u8 query_function; - }; + } __attribute__((packed)); __u8 response; __u8 status; __u8 ehs_length; diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h index b5bc8604efe8..c85fdd8895d8 100644 --- a/include/uapi/sound/asequencer.h +++ b/include/uapi/sound/asequencer.h @@ -207,7 +207,7 @@ struct snd_seq_ev_raw32 { struct snd_seq_ev_ext { unsigned int len; /* length of data */ void *ptr; /* pointer to data (note: maybe 64-bit) */ -} __attribute__((packed)); +} __packed; struct snd_seq_result { int event; /* processed event type */ @@ -251,7 +251,7 @@ struct snd_seq_ev_quote { struct snd_seq_addr origin; /* original sender */ unsigned short value; /* optional data */ struct snd_seq_event *event; /* quoted event */ -} __attribute__((packed)); +} __packed; union snd_seq_event_data { /* event data... */ struct snd_seq_ev_note note; diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index f9939da41122..d5b9cfbd9cea 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -142,7 +142,7 @@ struct snd_hwdep_dsp_image { * * *****************************************************************************/ -#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 15) +#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 16) typedef unsigned long snd_pcm_uframes_t; typedef signed long snd_pcm_sframes_t; @@ -267,7 +267,10 @@ typedef int __bitwise snd_pcm_format_t; typedef int __bitwise snd_pcm_subformat_t; #define SNDRV_PCM_SUBFORMAT_STD ((__force snd_pcm_subformat_t) 0) -#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_STD +#define SNDRV_PCM_SUBFORMAT_MSBITS_MAX ((__force snd_pcm_subformat_t) 1) +#define SNDRV_PCM_SUBFORMAT_MSBITS_20 ((__force snd_pcm_subformat_t) 2) +#define SNDRV_PCM_SUBFORMAT_MSBITS_24 ((__force snd_pcm_subformat_t) 3) +#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_MSBITS_24 #define SNDRV_PCM_INFO_MMAP 0x00000001 /* hardware supports mmap */ #define SNDRV_PCM_INFO_MMAP_VALID 0x00000002 /* period data are valid during transfer */ diff --git a/include/uapi/sound/scarlett2.h b/include/uapi/sound/scarlett2.h new file mode 100644 index 000000000000..91467ab0ed70 --- /dev/null +++ b/include/uapi/sound/scarlett2.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Focusrite Scarlett 2 Protocol Driver for ALSA + * (including Scarlett 2nd Gen, 3rd Gen, 4th Gen, Clarett USB, and + * Clarett+ series products) + * + * Copyright (c) 2023 by Geoffrey D. Bennett <g at b4.vu> + */ +#ifndef __UAPI_SOUND_SCARLETT2_H +#define __UAPI_SOUND_SCARLETT2_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +#define SCARLETT2_HWDEP_MAJOR 1 +#define SCARLETT2_HWDEP_MINOR 0 +#define SCARLETT2_HWDEP_SUBMINOR 0 + +#define SCARLETT2_HWDEP_VERSION \ + ((SCARLETT2_HWDEP_MAJOR << 16) | \ + (SCARLETT2_HWDEP_MINOR << 8) | \ + SCARLETT2_HWDEP_SUBMINOR) + +#define SCARLETT2_HWDEP_VERSION_MAJOR(v) (((v) >> 16) & 0xFF) +#define SCARLETT2_HWDEP_VERSION_MINOR(v) (((v) >> 8) & 0xFF) +#define SCARLETT2_HWDEP_VERSION_SUBMINOR(v) ((v) & 0xFF) + +/* Get protocol version */ +#define SCARLETT2_IOCTL_PVERSION _IOR('S', 0x60, int) + +/* Reboot */ +#define SCARLETT2_IOCTL_REBOOT _IO('S', 0x61) + +/* Select flash segment */ +#define SCARLETT2_SEGMENT_ID_SETTINGS 0 +#define SCARLETT2_SEGMENT_ID_FIRMWARE 1 +#define SCARLETT2_SEGMENT_ID_COUNT 2 + +#define SCARLETT2_IOCTL_SELECT_FLASH_SEGMENT _IOW('S', 0x62, int) + +/* Erase selected flash segment */ +#define SCARLETT2_IOCTL_ERASE_FLASH_SEGMENT _IO('S', 0x63) + +/* Get selected flash segment erase progress + * 1 through to num_blocks, or 255 for complete + */ +struct scarlett2_flash_segment_erase_progress { + unsigned char progress; + unsigned char num_blocks; +}; +#define SCARLETT2_IOCTL_GET_ERASE_PROGRESS \ + _IOR('S', 0x64, struct scarlett2_flash_segment_erase_progress) + +#endif /* __UAPI_SOUND_SCARLETT2_H */ diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h index 453cab2a1209..ee5708934614 100644 --- a/include/uapi/sound/sof/tokens.h +++ b/include/uapi/sound/sof/tokens.h @@ -35,6 +35,7 @@ /* buffers */ #define SOF_TKN_BUF_SIZE 100 #define SOF_TKN_BUF_CAPS 101 +#define SOF_TKN_BUF_FLAGS 102 /* DAI */ /* Token retired with ABI 3.2, do not use for new capabilities @@ -213,4 +214,8 @@ #define SOF_TKN_AMD_ACPI2S_CH 1701 #define SOF_TKN_AMD_ACPI2S_TDM_MODE 1702 +/* MICFIL PDM */ +#define SOF_TKN_IMX_MICFIL_RATE 2000 +#define SOF_TKN_IMX_MICFIL_CH 2001 + #endif diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h index 375718ba4ab6..8b8c5d1420fe 100644 --- a/include/uapi/xen/privcmd.h +++ b/include/uapi/xen/privcmd.h @@ -102,7 +102,7 @@ struct privcmd_mmap_resource { #define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0) struct privcmd_irqfd { - void __user *dm_op; + __u64 dm_op; __u32 size; /* Size of structure pointed by dm_op */ __u32 fd; __u32 flags; @@ -110,6 +110,22 @@ struct privcmd_irqfd { __u8 pad[2]; }; +/* For privcmd_ioeventfd::flags */ +#define PRIVCMD_IOEVENTFD_FLAG_DEASSIGN (1 << 0) + +struct privcmd_ioeventfd { + __u64 ioreq; + __u64 ports; + __u64 addr; + __u32 addr_len; + __u32 event_fd; + __u32 vcpus; + __u32 vq; + __u32 flags; + domid_t dom; + __u8 pad[2]; +}; + /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t @@ -138,6 +154,8 @@ struct privcmd_irqfd { #define IOCTL_PRIVCMD_MMAP_RESOURCE \ _IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource)) #define IOCTL_PRIVCMD_IRQFD \ - _IOC(_IOC_NONE, 'P', 8, sizeof(struct privcmd_irqfd)) + _IOW('P', 8, struct privcmd_irqfd) +#define IOCTL_PRIVCMD_IOEVENTFD \ + _IOW('P', 9, struct privcmd_ioeventfd) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index 0cced88f4531..b6003749bc83 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -14,6 +14,7 @@ #include <linux/bitops.h> #include <linux/types.h> #include <uapi/scsi/scsi_bsg_ufs.h> +#include <linux/time64.h> /* * Using static_assert() is not allowed in UAPI header files. Hence the check @@ -98,9 +99,10 @@ enum upiu_response_transaction { UPIU_TRANSACTION_REJECT_UPIU = 0x3F, }; -/* UPIU Read/Write flags */ +/* UPIU Read/Write flags. See also table "UPIU Flags" in the UFS standard. */ enum { UPIU_CMD_FLAGS_NONE = 0x00, + UPIU_CMD_FLAGS_CP = 0x04, UPIU_CMD_FLAGS_WRITE = 0x20, UPIU_CMD_FLAGS_READ = 0x40, }; @@ -550,6 +552,14 @@ struct ufs_vreg_info { struct ufs_vreg *vdd_hba; }; +/* UFS device descriptor wPeriodicRTCUpdate bit9 defines RTC time baseline */ +#define UFS_RTC_TIME_BASELINE BIT(9) + +enum ufs_rtc_time { + UFS_RTC_RELATIVE, + UFS_RTC_ABSOLUTE +}; + struct ufs_dev_info { bool f_power_on_wp_en; /* Keeps information if any of the LU is power on write protected */ @@ -577,6 +587,11 @@ struct ufs_dev_info { /* UFS EXT_IID Enable */ bool b_ext_iid_en; + + /* UFS RTC */ + enum ufs_rtc_time rtc_type; + time64_t rtc_time_baseline; + u32 rtc_update_period; }; /* diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 7d07b256e906..8e2bce9a4f21 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -16,6 +16,7 @@ #include <linux/blk-crypto-profile.h> #include <linux/blk-mq.h> #include <linux/devfreq.h> +#include <linux/fault-inject.h> #include <linux/msi.h> #include <linux/pm_runtime.h> #include <linux/dma-direction.h> @@ -28,6 +29,7 @@ #define UFSHCD "ufshcd" +struct scsi_device; struct ufs_hba; enum dev_cmd_type { @@ -371,6 +373,7 @@ struct ufs_hba_variant_ops { int (*get_outstanding_cqs)(struct ufs_hba *hba, unsigned long *ocqs); int (*config_esi)(struct ufs_hba *hba); + void (*config_scsi_dev)(struct scsi_device *sdev); }; /* clock gating state */ @@ -427,6 +430,7 @@ struct ufs_clk_gating { * @workq: workqueue to schedule devfreq suspend/resume work * @suspend_work: worker to suspend devfreq * @resume_work: worker to resume devfreq + * @target_freq: frequency requested by devfreq framework * @min_gear: lowest HS gear to scale down to * @is_enabled: tracks if scaling is currently enabled or not, controlled by * clkscale_enable sysfs node @@ -446,6 +450,7 @@ struct ufs_clk_scaling { struct workqueue_struct *workq; struct work_struct suspend_work; struct work_struct resume_work; + unsigned long target_freq; u32 min_gear; bool is_enabled; bool is_allowed; @@ -597,11 +602,6 @@ enum ufshcd_quirks { UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13, /* - * Align DMA SG entries on a 4 KiB boundary. - */ - UFSHCD_QUIRK_4KB_DMA_ALIGNMENT = 1 << 14, - - /* * This quirk needs to be enabled if the host controller does not * support UIC command */ @@ -865,6 +865,7 @@ enum ufshcd_mcq_opr { * @auto_bkops_enabled: to track whether bkops is enabled in device * @vreg_info: UFS device voltage regulator information * @clk_list_head: UFS host controller clocks list node head + * @use_pm_opp: Indicates whether OPP based scaling is used or not * @req_abort_count: number of times ufshcd_abort() has been called * @lanes_per_direction: number of lanes per data direction between the UFS * controller and the UFS device. @@ -911,6 +912,8 @@ enum ufshcd_mcq_opr { * @mcq_base: Multi circular queue registers base address * @uhq: array of supported hardware queues * @dev_cmd_queue: Queue for issuing device management commands + * @mcq_opr: MCQ operation and runtime registers + * @ufs_rtc_update_work: A work for UFS RTC periodic update */ struct ufs_hba { void __iomem *mmio_base; @@ -1015,6 +1018,7 @@ struct ufs_hba { bool auto_bkops_enabled; struct ufs_vreg_info vreg_info; struct list_head clk_list_head; + bool use_pm_opp; /* Number of requests aborts */ int req_abort_count; @@ -1057,6 +1061,10 @@ struct ufs_hba { struct delayed_work debugfs_ee_work; u32 debugfs_ee_rate_limit_ms; #endif +#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION + struct fault_attr trigger_eh_attr; + struct fault_attr timeout_attr; +#endif u32 luns_avail; unsigned int nr_hw_queues; unsigned int nr_queues[HCTX_MAX_TYPES]; @@ -1070,6 +1078,8 @@ struct ufs_hba { struct ufs_hw_queue *uhq; struct ufs_hw_queue *dev_cmd_queue; struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX]; + + struct delayed_work ufs_rtc_update_work; }; /** @@ -1230,6 +1240,8 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) ufshcd_writel(hba, tmp, reg); } +void ufshcd_enable_irq(struct ufs_hba *hba); +void ufshcd_disable_irq(struct ufs_hba *hba); int ufshcd_alloc_host(struct device *, struct ufs_hba **); void ufshcd_dealloc_host(struct ufs_hba *); int ufshcd_hba_enable(struct ufs_hba *hba); @@ -1253,6 +1265,9 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); +int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table, + struct dev_pm_opp *opp, void *data, + bool scaling_down); /** * ufshcd_set_variant - set variant specific data to the hba * @hba: per adapter instance @@ -1356,7 +1371,6 @@ static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba) return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); } -void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, const struct ufs_dev_quirk *fixups); diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h index 256eb3a43f54..360e1245fb40 100644 --- a/include/ufs/unipro.h +++ b/include/ufs/unipro.h @@ -193,7 +193,7 @@ #define DME_LocalAFC0ReqTimeOutVal 0xD043 /* PA power modes */ -enum { +enum ufs_pa_pwr_mode { FAST_MODE = 1, SLOW_MODE = 2, FASTAUTO_MODE = 4, @@ -205,7 +205,7 @@ enum { #define PWRMODE_RX_OFFSET 4 /* PA TX/RX Frequency Series */ -enum { +enum ufs_hs_gear_rate { PA_HS_MODE_A = 1, PA_HS_MODE_B = 2, }; diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h new file mode 100644 index 000000000000..c50d152e7b3e --- /dev/null +++ b/include/vdso/gettime.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VDSO_GETTIME_H +#define _VDSO_GETTIME_H + +#include <linux/types.h> + +struct __kernel_timespec; +struct timezone; + +#if !defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) +struct old_timespec32; +int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res); +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts); +#else +int __vdso_clock_getres(clockid_t clock, struct __kernel_timespec *res); +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); +#endif + +__kernel_old_time_t __vdso_time(__kernel_old_time_t *t); +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts); + +#endif diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h index 77252cb46361..a722dcbf5073 100644 --- a/include/video/mmp_disp.h +++ b/include/video/mmp_disp.h @@ -231,7 +231,7 @@ struct mmp_path { /* layers */ int overlay_num; - struct mmp_overlay overlays[]; + struct mmp_overlay overlays[] __counted_by(overlay_num); }; extern struct mmp_path *mmp_get_path(const char *name); diff --git a/include/video/sticore.h b/include/video/sticore.h index 945ad60463a1..823666af1871 100644 --- a/include/video/sticore.h +++ b/include/video/sticore.h @@ -2,7 +2,7 @@ #ifndef STICORE_H #define STICORE_H -struct fb_info; +struct device; /* generic STI structures & functions */ @@ -232,7 +232,7 @@ struct sti_rom_font { u8 height; u8 font_type; /* language type */ u8 bytes_per_char; - u32 next_font; + s32 next_font; /* note: signed int */ u8 underline_height; u8 underline_pos; u8 res008[2]; @@ -367,8 +367,8 @@ struct sti_struct { /* PCI data structures (pg. 17ff from sti.pdf) */ u8 rm_entry[16]; /* pci region mapper array == pci config space offset */ - /* pointer to the fb_info where this STI device is used */ - struct fb_info *info; + /* pointer to the parent device */ + struct device *dev; /* pointer to all internal data */ struct sti_all_data *sti_data; diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h index 8d2a3bfc8dac..47d96e75e8ef 100644 --- a/include/video/uvesafb.h +++ b/include/video/uvesafb.h @@ -109,8 +109,6 @@ struct uvesafb_ktask { u32 ack; }; -static int uvesafb_exec(struct uvesafb_ktask *tsk); - #define UVESAFB_EXACT_RES 1 #define UVESAFB_EXACT_DEPTH 2 diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h index 43ef24dd030e..9995695204f5 100644 --- a/include/xen/arm/hypervisor.h +++ b/include/xen/arm/hypervisor.h @@ -7,18 +7,6 @@ extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; -/* Lazy mode for batching updates / context switch */ -enum paravirt_lazy_mode { - PARAVIRT_LAZY_NONE, - PARAVIRT_LAZY_MMU, - PARAVIRT_LAZY_CPU, -}; - -static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) -{ - return PARAVIRT_LAZY_NONE; -} - #ifdef CONFIG_XEN void __init xen_early_init(void); #else diff --git a/include/xen/events.h b/include/xen/events.h index 95d5e28de324..3b07409f8032 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -88,7 +88,6 @@ void xen_irq_resume(void); /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq); -void xen_set_irq_pending(int irq); bool xen_test_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the @@ -101,12 +100,11 @@ void xen_poll_irq_timeout(int irq, u64 timeout); /* Determine the IRQ which is bound to an event channel */ unsigned int irq_from_evtchn(evtchn_port_t evtchn); -int irq_from_virq(unsigned int cpu, unsigned int virq); -evtchn_port_t evtchn_from_irq(unsigned irq); +int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq, + evtchn_port_t *evtchn); int xen_set_callback_via(uint64_t via); -void xen_evtchn_do_upcall(struct pt_regs *regs); -int xen_hvm_evtchn_do_upcall(void); +int xen_evtchn_do_upcall(void); /* Bind a pirq for a physical interrupt to an irq. */ int xen_bind_pirq_gsi_to_irq(unsigned gsi, @@ -123,9 +121,6 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, /* De-allocates the above mentioned physical interrupt. */ int xen_destroy_irq(int irq); -/* Return irq from pirq */ -int xen_irq_from_pirq(unsigned pirq); - /* Return the pirq allocated to the irq. */ int xen_pirq_from_irq(unsigned irq); diff --git a/include/xen/interface/hvm/ioreq.h b/include/xen/interface/hvm/ioreq.h new file mode 100644 index 000000000000..b02cfeae7eb5 --- /dev/null +++ b/include/xen/interface/hvm/ioreq.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: MIT */ +/* + * ioreq.h: I/O request definitions for device models + * Copyright (c) 2004, Intel Corporation. + */ + +#ifndef __XEN_PUBLIC_HVM_IOREQ_H__ +#define __XEN_PUBLIC_HVM_IOREQ_H__ + +#define IOREQ_READ 1 +#define IOREQ_WRITE 0 + +#define STATE_IOREQ_NONE 0 +#define STATE_IOREQ_READY 1 +#define STATE_IOREQ_INPROCESS 2 +#define STATE_IORESP_READY 3 + +#define IOREQ_TYPE_PIO 0 /* pio */ +#define IOREQ_TYPE_COPY 1 /* mmio ops */ +#define IOREQ_TYPE_PCI_CONFIG 2 +#define IOREQ_TYPE_TIMEOFFSET 7 +#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ + +/* + * VMExit dispatcher should cooperate with instruction decoder to + * prepare this structure and notify service OS and DM by sending + * virq. + * + * For I/O type IOREQ_TYPE_PCI_CONFIG, the physical address is formatted + * as follows: + * + * 63....48|47..40|39..35|34..32|31........0 + * SEGMENT |BUS |DEV |FN |OFFSET + */ +struct ioreq { + uint64_t addr; /* physical address */ + uint64_t data; /* data (or paddr of data) */ + uint32_t count; /* for rep prefixes */ + uint32_t size; /* size in bytes */ + uint32_t vp_eport; /* evtchn for notifications to/from device model */ + uint16_t _pad0; + uint8_t state:4; + uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr + * of the real data to use. */ + uint8_t dir:1; /* 1=read, 0=write */ + uint8_t df:1; + uint8_t _pad1:1; + uint8_t type; /* I/O type */ +}; + +#endif /* __XEN_PUBLIC_HVM_IOREQ_H__ */ diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h index 18417b017869..60e42d3b760e 100644 --- a/include/xen/interface/io/displif.h +++ b/include/xen/interface/io/displif.h @@ -537,7 +537,7 @@ struct xendispl_dbuf_create_req { struct xendispl_page_directory { grant_ref_t gref_dir_next_page; - grant_ref_t gref[1]; /* Variable length */ + grant_ref_t gref[]; }; /* diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index ba4c4274b714..4fef1efcdcab 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -95,7 +95,7 @@ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t __pad[48]; \ - union __name##_sring_entry ring[1]; /* variable-length */ \ + union __name##_sring_entry ring[]; \ }; \ \ /* "Front" end's private variables */ \ diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h index 445657cdb1de..b818517588b5 100644 --- a/include/xen/interface/io/sndif.h +++ b/include/xen/interface/io/sndif.h @@ -659,7 +659,7 @@ struct xensnd_open_req { struct xensnd_page_directory { grant_ref_t gref_dir_next_page; - grant_ref_t gref[1]; /* Variable length */ + grant_ref_t gref[]; }; /* |
