diff options
Diffstat (limited to 'include')
108 files changed, 1331 insertions, 688 deletions
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h index 6cdc873ac907..aa5233b1eba9 100644 --- a/include/clocksource/hyperv_timer.h +++ b/include/clocksource/hyperv_timer.h @@ -38,6 +38,8 @@ extern void hv_remap_tsc_clocksource(void); extern unsigned long hv_get_tsc_pfn(void); extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void); +extern void hv_adj_sched_clock_offset(u64 offset); + static __always_inline bool hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc, u64 *time) diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index f6a1cbb0f600..a80ba457a858 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -700,6 +700,13 @@ struct drm_dp_mst_topology_mgr { bool payload_id_table_cleared : 1; /** + * @reset_rx_state: The down request's reply and up request message + * receiver state must be reset, after the topology manager got + * removed. Protected by @lock. + */ + bool reset_rx_state : 1; + + /** * @payload_count: The number of currently active payloads in hardware. This value is only * intended to be used internally by MST helpers for payload tracking, and is only safe to * read/write from the atomic commit (not check) context. diff --git a/include/kunit/visibility.h b/include/kunit/visibility.h index efff77b58dd6..7c34c8ffcf3b 100644 --- a/include/kunit/visibility.h +++ b/include/kunit/visibility.h @@ -20,12 +20,11 @@ /** * EXPORT_SYMBOL_IF_KUNIT(symbol) - Exports symbol into * EXPORTED_FOR_KUNIT_TESTING namespace only if CONFIG_KUNIT is - * enabled. Must use MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING) + * enabled. Must use MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING") * in test file in order to use symbols. * @symbol: the symbol identifier to export */ - #define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, \ - EXPORTED_FOR_KUNIT_TESTING) + #define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, "EXPORTED_FOR_KUNIT_TESTING") #else #define VISIBLE_IF_KUNIT static #define EXPORT_SYMBOL_IF_KUNIT(symbol) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index e61dd7dd2286..147bd3ee4f7b 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -53,8 +53,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); -void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); -void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); @@ -127,8 +126,7 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} -static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} -static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 05f39fbfa485..6adcd1b92b20 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -40,7 +40,7 @@ struct irq_domain_ops; #include <asm/acpi.h> #ifdef CONFIG_ACPI_TABLE_LIB -#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI) +#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "ACPI") #define __init_or_acpilib #define __initdata_or_acpilib #else diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 7c0786bdf9af..0bbbe537c5f9 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -63,7 +63,12 @@ static inline void set_codetag_empty(union codetag_ref *ref) #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ static inline bool is_codetag_empty(union codetag_ref *ref) { return false; } -static inline void set_codetag_empty(union codetag_ref *ref) {} + +static inline void set_codetag_empty(union codetag_ref *ref) +{ + if (ref) + ref->ct = NULL; +} #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ @@ -135,7 +140,7 @@ static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag) #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) { - WARN_ONCE(ref && ref->ct, + WARN_ONCE(ref && ref->ct && !is_codetag_empty(ref), "alloc_tag was not cleared (got tag for %s:%u)\n", ref->ct->filename, ref->ct->lineno); diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index a28e2a6a13d0..74169dd0f659 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -166,9 +166,12 @@ static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev) return dev_get_drvdata(&fdev->dev); } +struct ffa_partition_info; + #if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT) -struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, - const struct ffa_ops *ops); +struct ffa_device * +ffa_device_register(const struct ffa_partition_info *part_info, + const struct ffa_ops *ops); void ffa_device_unregister(struct ffa_device *ffa_dev); int ffa_driver_register(struct ffa_driver *driver, struct module *owner, const char *mod_name); @@ -176,9 +179,9 @@ void ffa_driver_unregister(struct ffa_driver *driver); bool ffa_device_is_valid(struct ffa_device *ffa_dev); #else -static inline -struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, - const struct ffa_ops *ops) +static inline struct ffa_device * +ffa_device_register(const struct ffa_partition_info *part_info, + const struct ffa_ops *ops) { return NULL; } diff --git a/include/linux/bio.h b/include/linux/bio.h index 60830a6a5939..7a1b3b1a8fed 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -423,7 +423,7 @@ void __bio_add_page(struct bio *bio, struct page *page, void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, size_t off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); -void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter); +void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter); void __bio_release_pages(struct bio *bio, bool mark_dirty); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 08a727b40816..378d3a1a22fc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -200,8 +200,6 @@ struct gendisk { spinlock_t zone_wplugs_lock; struct mempool_s *zone_wplugs_pool; struct hlist_head *zone_wplugs_hash; - struct list_head zone_wplugs_err_list; - struct work_struct zone_wplugs_work; struct workqueue_struct *zone_wplugs_wq; #endif /* CONFIG_BLK_DEV_ZONED */ @@ -1421,6 +1419,9 @@ static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) return is_seq; } +int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask); + static inline unsigned int queue_dma_alignment(const struct request_queue *q) { return q->limits.dma_alignment; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index eaee2a819f4c..6e63dd3443b9 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1527,6 +1527,7 @@ struct bpf_prog_aux { bool is_extended; /* true if extended by freplace program */ bool jits_use_priv_stack; bool priv_stack_requested; + bool changes_pkt_data; u64 prog_array_member_cnt; /* counts how many times as member of prog_array */ struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */ struct bpf_arena *arena; @@ -2193,26 +2194,25 @@ bpf_prog_run_array(const struct bpf_prog_array *array, * rcu-protected dynamically sized maps. */ static __always_inline u32 -bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu, +bpf_prog_run_array_uprobe(const struct bpf_prog_array *array, const void *ctx, bpf_prog_run_fn run_prog) { const struct bpf_prog_array_item *item; const struct bpf_prog *prog; - const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_trace_run_ctx run_ctx; u32 ret = 1; might_fault(); + RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held"); + + if (unlikely(!array)) + return ret; - rcu_read_lock_trace(); migrate_disable(); run_ctx.is_uprobe = true; - array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held()); - if (unlikely(!array)) - goto out; old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); item = &array->items[0]; while ((prog = READ_ONCE(item->prog))) { @@ -2227,9 +2227,7 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu, rcu_read_unlock(); } bpf_reset_run_ctx(old_run_ctx); -out: migrate_enable(); - rcu_read_unlock_trace(); return ret; } @@ -3516,10 +3514,4 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog) return prog->aux->func_idx != 0; } -static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog) -{ - return prog->type == BPF_PROG_TYPE_TRACING && - prog->expected_attach_type == BPF_TRACE_RAW_TP; -} - #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f4290c179bee..48b7b2eeb7e2 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -659,6 +659,7 @@ struct bpf_subprog_info { bool args_cached: 1; /* true if bpf_fastcall stack region is used by functions that can't be inlined */ bool keep_fastcall_stack: 1; + bool changes_pkt_data: 1; enum priv_stack_mode priv_stack_mode; u8 arg_cnt; diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h index 18e0a2fc3816..5178b72bc920 100644 --- a/include/linux/bus/stm32_firewall_device.h +++ b/include/linux/bus/stm32_firewall_device.h @@ -115,7 +115,7 @@ void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 su #else /* CONFIG_STM32_FIREWALL */ int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall, - unsigned int nb_firewall); + unsigned int nb_firewall) { return -ENODEV; } diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 108060612bb8..7ad736538649 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -155,8 +155,14 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level) #ifndef CONFIG_ARCH_HAS_CPU_CACHE_ALIASING #define cpu_dcache_is_aliasing() false +#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing() #else #include <asm/cachetype.h> + +#ifndef cpu_icache_is_aliasing +#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing() +#endif + #endif #endif /* _LINUX_CACHEINFO_H */ diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index 966fcc5ff8ef..ec00e3f7af2b 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -273,12 +273,6 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ * an anonymous instance of the (guard) class, not recommended for * conditional locks. * - * if_not_guard(name, args...) { <error handling> }: - * convenience macro for conditional guards that calls the statement that - * follows only if the lock was not acquired (typically an error return). - * - * Only for conditional locks. - * * scoped_guard (name, args...) { }: * similar to CLASS(name, scope)(args), except the variable (with the * explicit name 'scope') is declard in a for-loop such that its scope is @@ -350,14 +344,6 @@ _label: \ #define scoped_cond_guard(_name, _fail, args...) \ __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args) -#define __if_not_guard(_name, _id, args...) \ - BUILD_BUG_ON(!__is_cond_ptr(_name)); \ - CLASS(_name, _id)(args); \ - if (!__guard_ptr(_name)(&_id)) - -#define if_not_guard(_name, args...) \ - __if_not_guard(_name, __UNIQUE_ID(guard), args) - /* * Additional helper macros for generating lock guards with types, either for * locks that don't have a native type (eg. RCU, preempt) or those that need a diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index ef1b16da6ad5..65b7c41471c3 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -49,6 +49,7 @@ struct module; * @archdata: Optional arch-specific data * @max_cycles: Maximum safe cycle value which won't overflow on * multiplication + * @max_raw_delta: Maximum safe delta value for negative motion detection * @name: Pointer to clocksource name * @list: List head for registration (internal) * @freq_khz: Clocksource frequency in khz. @@ -109,6 +110,7 @@ struct clocksource { struct arch_clocksource_data archdata; #endif u64 max_cycles; + u64 max_raw_delta; const char *name; struct list_head list; u32 freq_khz; diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 469a64dd6495..240c632c5b95 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -216,28 +216,43 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #endif /* __KERNEL__ */ +/** + * offset_to_ptr - convert a relative memory offset to an absolute pointer + * @off: the address of the 32-bit offset value + */ +static inline void *offset_to_ptr(const int *off) +{ + return (void *)((unsigned long)off + *off); +} + +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_64BIT +#define ARCH_SEL(a,b) a +#else +#define ARCH_SEL(a,b) b +#endif + /* * Force the compiler to emit 'sym' as a symbol, so that we can reference * it from inline assembler. Necessary in case 'sym' could be inlined * otherwise, or eliminated entirely due to lack of references that are * visible to the compiler. */ -#define ___ADDRESSABLE(sym, __attrs) \ - static void * __used __attrs \ +#define ___ADDRESSABLE(sym, __attrs) \ + static void * __used __attrs \ __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym; + #define __ADDRESSABLE(sym) \ ___ADDRESSABLE(sym, __section(".discard.addressable")) -/** - * offset_to_ptr - convert a relative memory offset to an absolute pointer - * @off: the address of the 32-bit offset value - */ -static inline void *offset_to_ptr(const int *off) -{ - return (void *)((unsigned long)off + *off); -} +#define __ADDRESSABLE_ASM(sym) \ + .pushsection .discard.addressable,"aw"; \ + .align ARCH_SEL(8,4); \ + ARCH_SEL(.quad, .long) __stringify(sym); \ + .popsection; -#endif /* __ASSEMBLY__ */ +#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym)) #ifdef __CHECKER__ #define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0) diff --git a/include/linux/cred.h b/include/linux/cred.h index e4a3155fe409..0c3c4b16b469 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -155,8 +155,6 @@ extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); extern int commit_creds(struct cred *); extern void abort_creds(struct cred *); -extern const struct cred *override_creds(const struct cred *); -extern void revert_creds(const struct cred *); extern struct cred *prepare_kernel_cred(struct task_struct *); extern int set_security_override(struct cred *, u32); extern int set_security_override_from_ctx(struct cred *, const char *); @@ -172,12 +170,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred) cred->cap_inheritable)); } -/* - * Override creds without bumping reference count. Caller must ensure - * reference remains valid or has taken reference. Almost always not the - * interface you want. Use override_creds()/revert_creds() instead. - */ -static inline const struct cred *override_creds_light(const struct cred *override_cred) +static inline const struct cred *override_creds(const struct cred *override_cred) { const struct cred *old = current->cred; @@ -185,35 +178,12 @@ static inline const struct cred *override_creds_light(const struct cred *overrid return old; } -static inline void revert_creds_light(const struct cred *revert_cred) -{ - rcu_assign_pointer(current->cred, revert_cred); -} - -/** - * get_new_cred_many - Get references on a new set of credentials - * @cred: The new credentials to reference - * @nr: Number of references to acquire - * - * Get references on the specified set of new credentials. The caller must - * release all acquired references. - */ -static inline struct cred *get_new_cred_many(struct cred *cred, int nr) +static inline const struct cred *revert_creds(const struct cred *revert_cred) { - atomic_long_add(nr, &cred->usage); - return cred; -} + const struct cred *override_cred = current->cred; -/** - * get_new_cred - Get a reference on a new set of credentials - * @cred: The new credentials to reference - * - * Get a reference on the specified set of new credentials. The caller must - * release the reference. - */ -static inline struct cred *get_new_cred(struct cred *cred) -{ - return get_new_cred_many(cred, 1); + rcu_assign_pointer(current->cred, revert_cred); + return override_cred; } /** @@ -236,7 +206,8 @@ static inline const struct cred *get_cred_many(const struct cred *cred, int nr) if (!cred) return cred; nonconst_cred->non_rcu = 0; - return get_new_cred_many(nonconst_cred, nr); + atomic_long_add(nr, &nonconst_cred->usage); + return cred; } /* diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b137fdb56093..346251bf1026 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -84,7 +84,7 @@ enum dma_transfer_direction { DMA_TRANS_NONE, }; -/** +/* * Interleaved Transfer Request * ---------------------------- * A chunk is collection of contiguous bytes to be transferred. @@ -223,7 +223,7 @@ enum sum_check_bits { }; /** - * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations + * enum sum_check_flags - result of async_{xor,pq}_zero_sum operations * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise */ @@ -286,7 +286,7 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; * pointer to the engine's metadata area * 4. Read out the metadata from the pointer * - * Note: the two mode is not compatible and clients must use one mode for a + * Warning: the two modes are not compatible and clients must use one mode for a * descriptor. */ enum dma_desc_metadata_mode { @@ -594,9 +594,13 @@ struct dma_descriptor_metadata_ops { * @phys: physical address of the descriptor * @chan: target channel for this operation * @tx_submit: accept the descriptor, assign ordered cookie and mark the + * @desc_free: driver's callback function to free a resusable descriptor + * after completion * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete + * @callback_result: error result from a DMA transaction * @callback_param: general parameter to pass to the callback routine + * @unmap: hook for generic DMA unmap data * @desc_metadata_mode: core managed metadata mode to protect mixed use of * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise * DESC_METADATA_NONE @@ -827,6 +831,9 @@ struct dma_filter { * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer, + * where the address and size of each segment is located in one entry of + * the dma_vec array. * @device_prep_slave_sg: prepares a slave dma operation * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. * The function takes a buffer of size buf_len. The callback function will diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h index 6fbfbde68a37..620a3260fc08 100644 --- a/include/linux/dsa/ocelot.h +++ b/include/linux/dsa/ocelot.h @@ -15,6 +15,7 @@ struct ocelot_skb_cb { struct sk_buff *clone; unsigned int ptp_class; /* valid only for clones */ + unsigned long ptp_tx_time; /* valid only for clones */ u32 tstamp_lo; u8 ptp_cmd; u8 ts_id; diff --git a/include/linux/export.h b/include/linux/export.h index 0bbd02fd351d..2633df4d31e6 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -60,14 +60,14 @@ #endif #ifdef DEFAULT_SYMBOL_NAMESPACE -#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE)) +#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, DEFAULT_SYMBOL_NAMESPACE) #else #define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, "") #endif #define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "") #define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "GPL") -#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns)) -#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns)) +#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", ns) +#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", ns) #endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 4cc8801e50e3..a087606ace19 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -3,6 +3,7 @@ #define LINUX_EXPORTFS_H 1 #include <linux/types.h> +#include <linux/path.h> struct dentry; struct iattr; @@ -156,6 +157,17 @@ struct fid { }; }; +enum handle_to_path_flags { + HANDLE_CHECK_PERMS = (1 << 0), + HANDLE_CHECK_SUBTREE = (1 << 1), +}; + +struct handle_to_path_ctx { + struct path root; + enum handle_to_path_flags flags; + unsigned int fh_flags; +}; + #define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */ #define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */ #define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */ @@ -225,6 +237,12 @@ struct fid { * is also a directory. In the event that it cannot be found, or storage * space cannot be allocated, a %ERR_PTR should be returned. * + * permission: + * Allow filesystems to specify a custom permission function. + * + * open: + * Allow filesystems to specify a custom open function. + * * commit_metadata: * @commit_metadata should commit metadata changes to stable storage. * @@ -251,6 +269,8 @@ struct export_operations { bool write, u32 *device_generation); int (*commit_blocks)(struct inode *inode, struct iomap *iomaps, int nr_iomaps, struct iattr *iattr); + int (*permission)(struct handle_to_path_ctx *ctx, unsigned int oflags); + struct file * (*open)(struct path *path, unsigned int oflags); #define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */ #define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */ #define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */ diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h index c50882f19235..966092ffa89a 100644 --- a/include/linux/fiemap.h +++ b/include/linux/fiemap.h @@ -5,12 +5,18 @@ #include <uapi/linux/fiemap.h> #include <linux/fs.h> +/** + * struct fiemap_extent_info - fiemap request to a filesystem + * @fi_flags: Flags as passed from user + * @fi_extents_mapped: Number of mapped extents + * @fi_extents_max: Size of fiemap_extent array + * @fi_extents_start: Start of fiemap_extent array + */ struct fiemap_extent_info { - unsigned int fi_flags; /* Flags as passed from user */ - unsigned int fi_extents_mapped; /* Number of mapped extents */ - unsigned int fi_extents_max; /* Size of fiemap_extent array */ - struct fiemap_extent __user *fi_extents_start; /* Start of - fiemap_extent array */ + unsigned int fi_flags; + unsigned int fi_extents_mapped; + unsigned int fi_extents_max; + struct fiemap_extent __user *fi_extents_start; }; int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, diff --git a/include/linux/filter.h b/include/linux/filter.h index 3a21947f2fd4..0477254bc2d3 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1122,7 +1122,7 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena); bool bpf_jit_supports_private_stack(void); u64 bpf_arch_uaddress_limit(void); void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); -bool bpf_helper_changes_pkt_data(void *func); +bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id); static inline bool bpf_dump_raw_ok(const struct cred *cred) { diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h index 3abe614ef5f0..4d3f8074c137 100644 --- a/include/linux/folio_queue.h +++ b/include/linux/folio_queue.h @@ -37,16 +37,20 @@ struct folio_queue { #if PAGEVEC_SIZE > BITS_PER_LONG #error marks is not big enough #endif + unsigned int rreq_id; + unsigned int debug_id; }; /** * folioq_init - Initialise a folio queue segment * @folioq: The segment to initialise + * @rreq_id: The request identifier to use in tracelines. * - * Initialise a folio queue segment. Note that the folio pointers are - * left uninitialised. + * Initialise a folio queue segment and set an identifier to be used in traces. + * + * Note that the folio pointers are left uninitialised. */ -static inline void folioq_init(struct folio_queue *folioq) +static inline void folioq_init(struct folio_queue *folioq, unsigned int rreq_id) { folio_batch_init(&folioq->vec); folioq->next = NULL; @@ -54,6 +58,8 @@ static inline void folioq_init(struct folio_queue *folioq) folioq->marks = 0; folioq->marks2 = 0; folioq->marks3 = 0; + folioq->rreq_id = rreq_id; + folioq->debug_id = 0; } /** diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 0d99bf11d260..e4ce1cae03bf 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -616,6 +616,12 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, return false; } +/* + * To work around what seems to be an optimizer bug, the macro arguments + * need to have const copies or the values end up changed by the time they + * reach fortify_warn_once(). See commit 6f7630b1b5bc ("fortify: Capture + * __bos() results in const temp vars") for more details. + */ #define __fortify_memcpy_chk(p, q, size, p_size, q_size, \ p_size_field, q_size_field, op) ({ \ const size_t __fortify_size = (size_t)(size); \ @@ -623,6 +629,8 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, const size_t __q_size = (q_size); \ const size_t __p_size_field = (p_size_field); \ const size_t __q_size_field = (q_size_field); \ + /* Keep a mutable version of the size for the final copy. */ \ + size_t __copy_size = __fortify_size; \ fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \ __q_size, __p_size_field, \ __q_size_field, FORTIFY_FUNC_ ##op), \ @@ -630,7 +638,11 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, __fortify_size, \ "field \"" #p "\" at " FILE_LINE, \ __p_size_field); \ - __underlying_##op(p, q, __fortify_size); \ + /* Hide only the run-time size from value range tracking to */ \ + /* silence compile-time false positive bounds warnings. */ \ + if (!__builtin_constant_p(__copy_size)) \ + OPTIMIZER_HIDE_VAR(__copy_size); \ + __underlying_##op(p, q, __copy_size); \ }) /* diff --git a/include/linux/fs.h b/include/linux/fs.h index f7efc6866ebc..2b8447f53200 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -322,6 +322,7 @@ struct readahead_control; #define IOCB_NOWAIT (__force int) RWF_NOWAIT #define IOCB_APPEND (__force int) RWF_APPEND #define IOCB_ATOMIC (__force int) RWF_ATOMIC +#define IOCB_DONTCACHE (__force int) RWF_DONTCACHE /* non-RWF related bits - start at 16 */ #define IOCB_EVENTFD (1 << 16) @@ -356,7 +357,8 @@ struct readahead_control; { IOCB_SYNC, "SYNC" }, \ { IOCB_NOWAIT, "NOWAIT" }, \ { IOCB_APPEND, "APPEND" }, \ - { IOCB_ATOMIC, "ATOMIC"}, \ + { IOCB_ATOMIC, "ATOMIC" }, \ + { IOCB_DONTCACHE, "DONTCACHE" }, \ { IOCB_EVENTFD, "EVENTFD"}, \ { IOCB_DIRECT, "DIRECT" }, \ { IOCB_WRITE, "WRITE" }, \ @@ -626,6 +628,7 @@ is_uncached_acl(struct posix_acl *acl) #define IOP_XATTR 0x0008 #define IOP_DEFAULT_READLINK 0x0010 #define IOP_MGTIME 0x0020 +#define IOP_CACHED_LINK 0x0040 /* * Keep mostly read-only and often accessed (especially for @@ -723,7 +726,10 @@ struct inode { }; struct file_lock_context *i_flctx; struct address_space i_data; - struct list_head i_devices; + union { + struct list_head i_devices; + int i_linklen; + }; union { struct pipe_inode_info *i_pipe; struct cdev *i_cdev; @@ -749,6 +755,13 @@ struct inode { void *i_private; /* fs or device private pointer */ } __randomize_layout; +static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen) +{ + inode->i_link = link; + inode->i_linklen = linklen; + inode->i_opflags |= IOP_CACHED_LINK; +} + /* * Get bit address from inode->i_state to use with wait_var_event() * infrastructre. @@ -2127,6 +2140,8 @@ struct file_operations { #define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5)) /* Supports asynchronous lock callbacks */ #define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6)) +/* File system supports uncached read/write buffered IO */ +#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7)) /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, @@ -3351,7 +3366,7 @@ extern const struct file_operations generic_ro_fops; #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) -extern int readlink_copy(char __user *, int, const char *); +extern int readlink_copy(char __user *, int, const char *, int); extern int page_readlink(struct dentry *, char __user *, int); extern const char *page_get_link(struct dentry *, struct inode *, struct delayed_call *); @@ -3613,6 +3628,14 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags, if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE)) return -EOPNOTSUPP; } + if (flags & RWF_DONTCACHE) { + /* file system must support it */ + if (!(ki->ki_filp->f_op->fop_flags & FOP_DONTCACHE)) + return -EOPNOTSUPP; + /* DAX mappings not supported */ + if (IS_DAX(ki->ki_filp->f_mapping->host)) + return -EOPNOTSUPP; + } kiocb_flags |= (__force int) (flags & RWF_SUPPORTED); if (flags & RWF_SYNC) kiocb_flags |= IOCB_DSYNC; diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h index 3ff4c277296f..9bd605b87c4c 100644 --- a/include/linux/fw_table.h +++ b/include/linux/fw_table.h @@ -54,7 +54,7 @@ int cdat_table_parse(enum acpi_cdat_type type, #define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x) #define __init_or_fwtbl_lib __init_or_acpilib #else -#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, CXL) +#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "CXL") #define __init_or_fwtbl_lib #endif diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index a6876ab29004..a2e47dbcf82c 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -78,7 +78,7 @@ struct hid_ops { const struct bus_type *bus_type; }; -extern struct hid_ops *hid_ops; +extern const struct hid_ops *hid_ops; /** * struct hid_bpf_ops - A BPF struct_ops of callbacks allowing to attach HID-BPF diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 6e452bd8e7e3..5c6bea81a90e 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -224,7 +224,13 @@ static inline struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, unsigned long vaddr) { - return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr); + struct folio *folio; + + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); + if (folio && user_alloc_needs_zeroing()) + clear_user_highpage(&folio->page, vaddr); + + return folio; } #endif diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 7ef5f7ef31a9..f7bfdcf0dda3 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -386,6 +386,7 @@ extern void __init hrtimers_init(void); extern void sysrq_timer_list_show(void); int hrtimers_prepare_cpu(unsigned int cpu); +int hrtimers_cpu_starting(unsigned int cpu); #ifdef CONFIG_HOTPLUG_CPU int hrtimers_cpu_dying(unsigned int cpu); #else diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 22c22fb91042..02a226bcf0ed 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1559,6 +1559,7 @@ struct hv_util_service { void *channel; void (*util_cb)(void *); int (*util_init)(struct hv_util_service *); + int (*util_init_transport)(void); void (*util_deinit)(void); int (*util_pre_suspend)(void); int (*util_pre_resume)(void); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index c1645c86eed9..d65b5d71b93b 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -585,13 +585,16 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query * @type: first vlan protocol + * @mac_offset: MAC offset * @depth: buffer to store length of eth and vlan tags in bytes * * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, - int *depth) +static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb, + __be16 type, + int mac_offset, + int *depth) { unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; @@ -610,7 +613,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, do { struct vlan_hdr vhdr, *vh; - vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); + vh = skb_header_pointer(skb, mac_offset + vlan_depth, + sizeof(vhdr), &vhdr); if (unlikely(!vh || !--parse_depth)) return 0; @@ -625,6 +629,12 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, return type; } +static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, + int *depth) +{ + return __vlan_get_protocol_offset(skb, type, 0, depth); +} + /** * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index e123d5e17b52..85fe4e6b275c 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -15,10 +15,8 @@ bool io_is_uring_fops(struct file *file); static inline void io_uring_files_cancel(void) { - if (current->io_uring) { - io_uring_unreg_ringfd(); + if (current->io_uring) __io_uring_cancel(false); - } } static inline void io_uring_task_cancel(void) { diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index 578a3fdf5c71..a3ce553413de 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -18,6 +18,11 @@ struct io_uring_cmd { u8 pdu[32]; /* available inline for free use */ }; +struct io_uring_cmd_data { + struct io_uring_sqe sqes[2]; + void *op_data; +}; + static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) { return sqe->cmd; @@ -43,7 +48,7 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, * Note: the caller should never hard code @issue_flags and is only allowed * to pass the mask provided by the core io_uring code. */ -void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2, +void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2, unsigned issue_flags); void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, @@ -67,7 +72,7 @@ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, return -EOPNOTSUPP; } static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, - ssize_t ret2, unsigned issue_flags) + u64 ret2, unsigned issue_flags) { } static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, @@ -113,4 +118,9 @@ static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd return cmd_to_io_kiocb(cmd)->tctx->task; } +static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd) +{ + return cmd_to_io_kiocb(cmd)->async_data; +} + #endif /* _LINUX_IO_URING_CMD_H */ diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 011860ade268..fd4cdb0860a2 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -345,7 +345,7 @@ struct io_ring_ctx { /* timeouts */ struct { - spinlock_t timeout_lock; + raw_spinlock_t timeout_lock; struct list_head timeout_list; struct list_head ltimeout_list; unsigned cq_last_tm_flush; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 5675af6b740c..75bf54e76f3b 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -335,7 +335,7 @@ struct iomap_ioend { u16 io_type; u16 io_flags; /* IOMAP_F_* */ struct inode *io_inode; /* file being written to */ - size_t io_size; /* size of the extent */ + size_t io_size; /* size of data within eof */ loff_t io_offset; /* offset in the file */ sector_t io_sector; /* start sector of ioend */ struct bio io_bio; /* MUST BE LAST! */ diff --git a/include/linux/lockref.h b/include/linux/lockref.h index c3a1f78bc884..c39f119659ba 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -34,14 +34,24 @@ struct lockref { }; }; -extern void lockref_get(struct lockref *); -extern int lockref_put_return(struct lockref *); -extern int lockref_get_not_zero(struct lockref *); -extern int lockref_put_not_zero(struct lockref *); -extern int lockref_put_or_lock(struct lockref *); - -extern void lockref_mark_dead(struct lockref *); -extern int lockref_get_not_dead(struct lockref *); +/** + * lockref_init - Initialize a lockref + * @lockref: pointer to lockref structure + * @count: initial count + */ +static inline void lockref_init(struct lockref *lockref, unsigned int count) +{ + spin_lock_init(&lockref->lock); + lockref->count = count; +} + +void lockref_get(struct lockref *lockref); +int lockref_put_return(struct lockref *lockref); +bool lockref_get_not_zero(struct lockref *lockref); +bool lockref_put_or_lock(struct lockref *lockref); + +void lockref_mark_dead(struct lockref *lockref); +bool lockref_get_not_dead(struct lockref *lockref); /* Must be called under spinlock for reliable results */ static inline bool __lockref_is_dead(const struct lockref *l) diff --git a/include/linux/memfd.h b/include/linux/memfd.h index 3f2cf339ceaf..d437e3070850 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -7,6 +7,7 @@ #ifdef CONFIG_MEMFD_CREATE extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg); struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); +unsigned int *memfd_file_seals_ptr(struct file *file); #else static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) { @@ -16,6 +17,19 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) { return ERR_PTR(-EINVAL); } + +static inline unsigned int *memfd_file_seals_ptr(struct file *file) +{ + return NULL; +} #endif +/* Retrieve memfd seals associated with the file, if any. */ +static inline unsigned int memfd_file_seals(struct file *file) +{ + unsigned int *sealsp = memfd_file_seals_ptr(file); + + return sealsp ? *sealsp : 0; +} + #endif /* __LINUX_MEMFD_H */ diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h index 8db52324f416..eae82f421414 100644 --- a/include/linux/mfd/da9063/core.h +++ b/include/linux/mfd/da9063/core.h @@ -78,6 +78,7 @@ struct da9063 { enum da9063_type type; unsigned char variant_code; unsigned int flags; + bool use_sw_pm; /* Control interface */ struct regmap *regmap; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index fc7e6153b73d..ea48eb879a0f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -524,6 +524,7 @@ enum { * creation/deletion on drivers rescan. Unset during device attach. */ MLX5_PRIV_FLAGS_DETACH = 1 << 2, + MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3, }; struct mlx5_adev { @@ -1202,6 +1203,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) return dev->coredev_type == MLX5_COREDEV_VF; } +static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1, + const struct mlx5_core_dev *dev2) +{ + return dev1->coredev_type == dev2->coredev_type; +} + static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) { return dev->caps.embedded_cpu; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 4fbbcf35498b..48d47181c7cd 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -2119,7 +2119,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 migration_in_chunks[0x1]; u8 reserved_at_d1[0x1]; u8 sf_eq_usage[0x1]; - u8 reserved_at_d3[0xd]; + u8 reserved_at_d3[0x5]; + u8 multiplane[0x1]; + u8 reserved_at_d9[0x7]; u8 cross_vhca_object_to_object_supported[0x20]; diff --git a/include/linux/mm.h b/include/linux/mm.h index c39c4945946c..b1c3db9cf355 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -31,6 +31,7 @@ #include <linux/kasan.h> #include <linux/memremap.h> #include <linux/slab.h> +#include <linux/cacheinfo.h> struct mempolicy; struct anon_vma; @@ -3010,7 +3011,15 @@ static inline void pagetable_pte_dtor(struct ptdesc *ptdesc) lruvec_stat_sub_folio(folio, NR_PAGETABLE); } -pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); +pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); +static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, + pmd_t *pmdvalp) +{ + pte_t *pte; + + __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp)); + return pte; +} static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr) { return __pte_offset_map(pmd, addr, NULL); @@ -3023,7 +3032,8 @@ static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, { pte_t *pte; - __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)); + __cond_lock(RCU, __cond_lock(*ptlp, + pte = __pte_offset_map_lock(mm, pmd, addr, ptlp))); return pte; } @@ -3115,6 +3125,7 @@ static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) if (!pmd_ptlock_init(ptdesc)) return false; __folio_set_pgtable(folio); + ptdesc_pmd_pts_init(ptdesc); lruvec_stat_add_folio(folio, NR_PAGETABLE); return true; } @@ -4091,6 +4102,37 @@ void mem_dump_obj(void *object); static inline void mem_dump_obj(void *object) {} #endif +static inline bool is_write_sealed(int seals) +{ + return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); +} + +/** + * is_readonly_sealed - Checks whether write-sealed but mapped read-only, + * in which case writes should be disallowing moving + * forwards. + * @seals: the seals to check + * @vm_flags: the VMA flags to check + * + * Returns whether readonly sealed, in which case writess should be disallowed + * going forward. + */ +static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags) +{ + /* + * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as + * MAP_SHARED and read-only, take care to not allow mprotect to + * revert protections on such mappings. Do this only for shared + * mappings. For private mappings, don't need to mask + * VM_MAYWRITE as we still want them to be COW-writable. + */ + if (is_write_sealed(seals) && + ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED)) + return true; + + return false; +} + /** * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and * handle them. @@ -4102,24 +4144,15 @@ static inline void mem_dump_obj(void *object) {} */ static inline int seal_check_write(int seals, struct vm_area_struct *vma) { - if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { - /* - * New PROT_WRITE and MAP_SHARED mmaps are not allowed when - * write seals are active. - */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return -EPERM; - - /* - * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as - * MAP_SHARED and read-only, take care to not allow mprotect to - * revert protections on such mappings. Do this only for shared - * mappings. For private mappings, don't need to mask - * VM_MAYWRITE as we still want them to be COW-writable. - */ - if (vma->vm_flags & VM_SHARED) - vm_flags_clear(vma, VM_MAYWRITE); - } + if (!is_write_sealed(seals)) + return 0; + + /* + * New PROT_WRITE and MAP_SHARED mmaps are not allowed when + * write seals are active. + */ + if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) + return -EPERM; return 0; } @@ -4175,6 +4208,23 @@ static inline int do_mseal(unsigned long start, size_t len_in, unsigned long fla } #endif +/* + * user_alloc_needs_zeroing checks if a user folio from page allocator needs to + * be zeroed or not. + */ +static inline bool user_alloc_needs_zeroing(void) +{ + /* + * for user folios, arch with cache aliasing requires cache flush and + * arc changes folio->flags to make icache coherent with dcache, so + * always return false to make caller use + * clear_user_page()/clear_user_highpage(). + */ + return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() || + !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, + &init_on_alloc); +} + int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status); int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status); int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 7361a8f3ab68..332cee285662 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -445,6 +445,7 @@ FOLIO_MATCH(compound_head, _head_2a); * @pt_index: Used for s390 gmap. * @pt_mm: Used for x86 pgds. * @pt_frag_refcount: For fragmented page table tracking. Powerpc only. + * @pt_share_count: Used for HugeTLB PMD page table share count. * @_pt_pad_2: Padding to ensure proper alignment. * @ptl: Lock for the page table. * @__page_type: Same as page->page_type. Unused for page tables. @@ -471,6 +472,9 @@ struct ptdesc { pgoff_t pt_index; struct mm_struct *pt_mm; atomic_t pt_frag_refcount; +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING + atomic_t pt_share_count; +#endif }; union { @@ -516,6 +520,32 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page)); const struct page *: (const struct ptdesc *)(p), \ struct page *: (struct ptdesc *)(p))) +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING +static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc) +{ + atomic_set(&ptdesc->pt_share_count, 0); +} + +static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc) +{ + atomic_inc(&ptdesc->pt_share_count); +} + +static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc) +{ + atomic_dec(&ptdesc->pt_share_count); +} + +static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc) +{ + return atomic_read(&ptdesc->pt_share_count); +} +#else +static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc) +{ +} +#endif + /* * Used for sizing the vmemmap region on some architectures */ diff --git a/include/linux/module.h b/include/linux/module.h index c60ee39cb9b1..b3a643435357 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -296,7 +296,7 @@ extern typeof(name) __mod_device_table__##type##__##name \ * files require multiple MODULE_FIRMWARE() specifiers */ #define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) -#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, __stringify(ns)) +#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, ns) struct notifier_block; @@ -773,7 +773,8 @@ void *__module_writable_address(struct module *mod, void *loc); static inline void *module_writable_address(struct module *mod, void *loc) { - if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) || !mod) + if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) || !mod || + mod->state != MODULE_STATE_UNFORMED) return loc; return __module_writable_address(mod, loc); } diff --git a/include/linux/mount.h b/include/linux/mount.h index c34c18b4e8f3..dcc17ce8a959 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -50,7 +50,7 @@ struct path; #define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ - MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB) + MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) #define MNT_INTERNAL 0x4000 @@ -64,7 +64,6 @@ struct path; #define MNT_SYNC_UMOUNT 0x2000000 #define MNT_MARKED 0x4000000 #define MNT_UMOUNT 0x8000000 -#define MNT_ONRB 0x10000000 struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ @@ -76,7 +75,7 @@ struct vfsmount { static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt) { /* Pairs with smp_store_release() in do_idmap_mount(). */ - return smp_load_acquire(&mnt->mnt_idmap); + return READ_ONCE(mnt->mnt_idmap); } extern int mnt_want_write(struct vfsmount *mnt); @@ -114,7 +113,7 @@ extern struct vfsmount *kern_mount(struct file_system_type *); extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); -extern long do_mount(const char *, const char __user *, +int do_mount(const char *, const char __user *, const char *, unsigned long, void *); extern struct vfsmount *collect_mounts(const struct path *); extern void drop_collected_mounts(struct vfsmount *); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 66e7d26b70a4..11be70a7929f 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -253,4 +253,11 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) +static inline netdev_features_t netdev_base_features(netdev_features_t features) +{ + features &= ~NETIF_F_ONE_FOR_ALL; + features |= NETIF_F_ALL_FOR_ALL; + return features; +} + #endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 5eaceef41e6c..071d05d81d38 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -18,9 +18,11 @@ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/uio.h> +#include <linux/rolling_buffer.h> enum netfs_sreq_ref_trace; typedef struct mempool_s mempool_t; +struct folio_queue; /** * folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED] @@ -71,6 +73,7 @@ struct netfs_inode { #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ #define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */ +#define NETFS_ICTX_SINGLE_NO_UPLOAD 4 /* Monolithic payload, cache but no upload */ }; /* @@ -178,39 +181,35 @@ struct netfs_io_subrequest { unsigned long long start; /* Where to start the I/O */ size_t len; /* Size of the I/O */ size_t transferred; /* Amount of data transferred */ - size_t consumed; /* Amount of read data consumed */ - size_t prev_donated; /* Amount of data donated from previous subreq */ - size_t next_donated; /* Amount of data donated from next subreq */ refcount_t ref; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ unsigned int nr_segs; /* Number of segs in io_iter */ + u8 retry_count; /* The number of retries (0 on initial pass) */ enum netfs_io_source source; /* Where to read from/write to */ unsigned char stream_nr; /* I/O stream this belongs to */ - unsigned char curr_folioq_slot; /* Folio currently being read */ - unsigned char curr_folio_order; /* Order of folio */ - struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */ unsigned long flags; #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ -#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ +#define NETFS_SREQ_MADE_PROGRESS 4 /* Set if we transferred at least some data */ #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ #define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */ #define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */ #define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */ #define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */ -#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */ -#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */ +#define NETFS_SREQ_FAILED 10 /* Set if the subreq failed unretryably */ }; enum netfs_io_origin { NETFS_READAHEAD, /* This read was triggered by readahead */ NETFS_READPAGE, /* This read is a synchronous read */ NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */ + NETFS_READ_SINGLE, /* This read should be treated as a single object */ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ NETFS_DIO_READ, /* This is a direct I/O read */ NETFS_WRITEBACK, /* This write was triggered by writepages */ + NETFS_WRITEBACK_SINGLE, /* This monolithic write was triggered by writepages */ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */ NETFS_DIO_WRITE, /* This is a direct I/O write */ @@ -231,16 +230,16 @@ struct netfs_io_request { struct address_space *mapping; /* The mapping being accessed */ struct kiocb *iocb; /* AIO completion vector */ struct netfs_cache_resources cache_resources; + struct netfs_io_request *copy_to_cache; /* Request to write just-read data to the cache */ struct readahead_control *ractl; /* Readahead descriptor */ struct list_head proc_link; /* Link in netfs_iorequests */ - struct list_head subrequests; /* Contributory I/O operations */ struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */ #define NR_IO_STREAMS 2 //wreq->nr_io_streams struct netfs_group *group; /* Writeback group being written back */ - struct folio_queue *buffer; /* Head of I/O buffer */ - struct folio_queue *buffer_tail; /* Tail of I/O buffer */ - struct iov_iter iter; /* Unencrypted-side iterator */ - struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ + struct rolling_buffer buffer; /* Unencrypted buffer */ +#define NETFS_ROLLBUF_PUT_MARK ROLLBUF_MARK_1 +#define NETFS_ROLLBUF_PAGECACHE_MARK ROLLBUF_MARK_2 + wait_queue_head_t waitq; /* Processor waiter */ void *netfs_priv; /* Private data for the netfs */ void *netfs_priv2; /* Private data for the netfs */ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ @@ -251,29 +250,28 @@ struct netfs_io_request { atomic_t subreq_counter; /* Next subreq->debug_index */ unsigned int nr_group_rel; /* Number of refs to release on ->group */ spinlock_t lock; /* Lock for queuing subreqs */ - atomic_t nr_outstanding; /* Number of ops in progress */ unsigned long long submitted; /* Amount submitted for I/O so far */ unsigned long long len; /* Length of the request */ size_t transferred; /* Amount to be indicated as transferred */ long error; /* 0 or error that occurred */ enum netfs_io_origin origin; /* Origin of the request */ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ - u8 buffer_head_slot; /* First slot in ->buffer */ - u8 buffer_tail_slot; /* Next slot in ->buffer_tail */ unsigned long long i_size; /* Size of the file */ unsigned long long start; /* Start position */ atomic64_t issued_to; /* Write issuer folio cursor */ unsigned long long collected_to; /* Point we've collected to */ unsigned long long cleaned_to; /* Position we've cleaned folios to */ + unsigned long long abandon_to; /* Position to abandon folios to */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ - size_t prev_donated; /* Fallback for subreq->prev_donated */ + unsigned char front_folio_order; /* Order (size) of front folio */ refcount_t ref; unsigned long flags; -#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ +#define NETFS_RREQ_OFFLOAD_COLLECTION 0 /* Offload collection to workqueue */ #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ #define NETFS_RREQ_FAILED 4 /* The request failed */ #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ +#define NETFS_RREQ_FOLIO_COPY_TO_CACHE 6 /* Copy current folio to cache from read */ #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ #define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */ #define NETFS_RREQ_BLOCKED 10 /* We blocked */ @@ -410,6 +408,13 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * struct netfs_group *netfs_group); ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from); +/* Single, monolithic object read/write API. */ +void netfs_single_mark_inode_dirty(struct inode *inode); +ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter); +int netfs_writeback_single(struct address_space *mapping, + struct writeback_control *wbc, + struct iov_iter *iter); + /* Address operations API */ struct readahead_control; void netfs_readahead(struct readahead_control *); @@ -429,10 +434,8 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp); vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group); /* (Sub)request management API. */ -void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq, - bool was_async); -void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, - int error, bool was_async); +void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq); +void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq); void netfs_get_subrequest(struct netfs_io_subrequest *subreq, enum netfs_sreq_ref_trace what); void netfs_put_subrequest(struct netfs_io_subrequest *subreq, @@ -454,6 +457,18 @@ void netfs_end_io_write(struct inode *inode); int netfs_start_io_direct(struct inode *inode); void netfs_end_io_direct(struct inode *inode); +/* Miscellaneous APIs. */ +struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp, + unsigned int trace /*enum netfs_folioq_trace*/); +void netfs_folioq_free(struct folio_queue *folioq, + unsigned int trace /*enum netfs_trace_folioq*/); + +/* Buffer wrangling helpers API. */ +int netfs_alloc_folioq_buffer(struct address_space *mapping, + struct folio_queue **_buffer, + size_t *_cur_size, ssize_t size, gfp_t gfp); +void netfs_free_folioq_buffer(struct folio_queue *fq); + /** * netfs_inode - Get the netfs inode context from the inode * @inode: The inode to query diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 2220bfec278e..691506bdf2c5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -306,7 +306,7 @@ static const unsigned long *const_folio_flags(const struct folio *folio, { const struct page *page = &folio->page; - VM_BUG_ON_PGFLAGS(PageTail(page), page); + VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); return &page[n].flags; } @@ -315,7 +315,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n) { struct page *page = &folio->page; - VM_BUG_ON_PGFLAGS(PageTail(page), page); + VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); return &page[n].flags; } @@ -862,18 +862,10 @@ static inline void ClearPageCompound(struct page *page) ClearPageHead(page); } FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) -FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE) -/* - * PG_partially_mapped is protected by deferred_split split_queue_lock, - * so its safe to use non-atomic set/clear. - */ -__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE) -__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE) +FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE) #else FOLIO_FLAG_FALSE(large_rmappable) -FOLIO_TEST_FLAG_FALSE(partially_mapped) -__FOLIO_SET_FLAG_NOOP(partially_mapped) -__FOLIO_CLEAR_FLAG_NOOP(partially_mapped) +FOLIO_FLAG_FALSE(partially_mapped) #endif #define PG_head_mask ((1UL << PG_head)) diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 35842d1e3879..5b520fe86b60 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -221,10 +221,7 @@ do { \ } while (0) #define PERCPU_PTR(__p) \ -({ \ - unsigned long __pcpu_ptr = (__force unsigned long)(__p); \ - (typeof(*(__p)) __force __kernel *)(__pcpu_ptr); \ -}) + (typeof(*(__p)) __force __kernel *)((__force unsigned long)(__p)) #ifdef CONFIG_SMP diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 0e43ab653ab6..3469c4b20105 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -231,7 +231,7 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) } void pgalloc_tag_split(struct folio *folio, int old_order, int new_order); -void pgalloc_tag_copy(struct folio *new, struct folio *old); +void pgalloc_tag_swap(struct folio *new, struct folio *old); void __init alloc_tag_sec_init(void); @@ -245,7 +245,7 @@ static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} static inline void alloc_tag_sec_init(void) {} static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} -static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) {} +static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} #endif /* CONFIG_MEM_ALLOC_PROFILING */ diff --git a/include/linux/pid.h b/include/linux/pid.h index a3aad9b4074c..98837a1ff0f3 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -59,6 +59,7 @@ struct pid spinlock_t lock; struct dentry *stashed; u64 ino; + struct rb_node pidfs_node; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; struct hlist_head inodes; @@ -68,6 +69,7 @@ struct pid struct upid numbers[]; }; +extern seqcount_spinlock_t pidmap_lock_seq; extern struct pid init_struct_pid; struct file; @@ -106,9 +108,6 @@ extern void exchange_tids(struct task_struct *task, struct task_struct *old); extern void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type); -extern int pid_max; -extern int pid_max_min, pid_max_max; - /* * look up a PID in the hash table. Must be called with the tasklist_lock * or rcu_read_lock() held. diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index f9f9931e02d6..7c67a5811199 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -30,6 +30,7 @@ struct pid_namespace { struct task_struct *child_reaper; struct kmem_cache *pid_cachep; unsigned int level; + int pid_max; struct pid_namespace *parent; #ifdef CONFIG_BSD_PROCESS_ACCT struct fs_pin *bacct; @@ -38,9 +39,14 @@ struct pid_namespace { struct ucounts *ucounts; int reboot; /* group exit code if this pidns was rebooted */ struct ns_common ns; -#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) + struct work_struct work; +#ifdef CONFIG_SYSCTL + struct ctl_table_set set; + struct ctl_table_header *sysctls; +#if defined(CONFIG_MEMFD_CREATE) int memfd_noexec_scope; #endif +#endif } __randomize_layout; extern struct pid_namespace init_pid_ns; @@ -117,6 +123,8 @@ static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); void pidhash_init(void); void pid_idr_init(void); +int register_pidns_sysctls(struct pid_namespace *pidns); +void unregister_pidns_sysctls(struct pid_namespace *pidns); static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) { diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h index 75bdf9807802..7c830d0dec9a 100644 --- a/include/linux/pidfs.h +++ b/include/linux/pidfs.h @@ -4,5 +4,8 @@ struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags); void __init pidfs_init(void); +void pidfs_add_pid(struct pid *pid); +void pidfs_remove_pid(struct pid *pid); +extern const struct dentry_operations pidfs_dentry_operations; #endif /* _LINUX_PID_FS_H */ diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h index 576d952f97ed..967a6ef31cf9 100644 --- a/include/linux/platform_data/amd_qdma.h +++ b/include/linux/platform_data/amd_qdma.h @@ -26,11 +26,13 @@ struct dma_slave_map; * @max_mm_channels: Maximum number of MM DMA channels in each direction * @device_map: DMA slave map * @irq_index: The index of first IRQ + * @dma_dev: The device pointer for dma operations */ struct qdma_platdata { u32 max_mm_channels; u32 irq_index; struct dma_slave_map *device_map; + struct device *dma_dev; }; #endif /* _PLATDATA_AMD_QDMA_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 7132623e4658..074754c23d33 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -235,17 +235,7 @@ extern void platform_device_put(struct platform_device *pdev); struct platform_driver { int (*probe)(struct platform_device *); - - /* - * .remove_new() is a relic from a prototype conversion of .remove(). - * New drivers are supposed to implement .remove(). Once all drivers are - * converted to not use .remove_new any more, it will be dropped. - */ - union { - void (*remove)(struct platform_device *); - void (*remove_new)(struct platform_device *); - }; - + void (*remove)(struct platform_device *); void (*shutdown)(struct platform_device *); int (*suspend)(struct platform_device *, pm_message_t state); int (*resume)(struct platform_device *); diff --git a/include/linux/pm.h b/include/linux/pm.h index 97b0e23363c8..e7f0260f15ad 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -385,7 +385,7 @@ const struct dev_pm_ops name = { \ #ifdef CONFIG_PM #define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns) #define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name) -#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns) +#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, "ns") #else #define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns) #define EXPORT_PM_FN_GPL(name) diff --git a/include/linux/poll.h b/include/linux/poll.h index d1ea4f3714a8..12bb18e8b978 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -25,14 +25,14 @@ struct poll_table_struct; -/* +/* * structures and helpers for f_op->poll implementations */ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); /* - * Do not touch the structure directly, use the access functions - * poll_does_not_wait() and poll_requested_events() instead. + * Do not touch the structure directly, use the access function + * poll_requested_events() instead. */ typedef struct poll_table_struct { poll_queue_proc _qproc; @@ -41,18 +41,16 @@ typedef struct poll_table_struct { static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) { - if (p && p->_qproc && wait_address) + if (p && p->_qproc) { p->_qproc(filp, wait_address, p); -} - -/* - * Return true if it is guaranteed that poll will not wait. This is the case - * if the poll() of another file descriptor in the set got an event, so there - * is no need for waiting. - */ -static inline bool poll_does_not_wait(const poll_table *p) -{ - return p == NULL || p->_qproc == NULL; + /* + * This memory barrier is paired in the wq_has_sleeper(). + * See the comment above prepare_to_wait(), we need to + * ensure that subsequent tests in this thread can't be + * reordered with __add_wait_queue() in _qproc() paths. + */ + smp_mb(); + } } /* diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h index c9a31c567e85..2e18fef1a2e1 100644 --- a/include/linux/pruss_driver.h +++ b/include/linux/pruss_driver.h @@ -144,32 +144,32 @@ static inline int pruss_release_mem_region(struct pruss *pruss, static inline int pruss_cfg_get_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 *mux) { - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; } static inline int pruss_cfg_set_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 mux) { - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; } static inline int pruss_cfg_gpimode(struct pruss *pruss, enum pruss_pru_id pru_id, enum pruss_gpi_mode mode) { - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; } static inline int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable) { - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; } static inline int pruss_cfg_xfr_enable(struct pruss *pruss, enum pru_type pru_type, - bool enable); + bool enable) { - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; } #endif /* CONFIG_TI_PRUSS */ diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h index 730f77381d55..2503f7625d65 100644 --- a/include/linux/pseudo_fs.h +++ b/include/linux/pseudo_fs.h @@ -5,6 +5,7 @@ struct pseudo_fs_context { const struct super_operations *ops; + const struct export_operations *eops; const struct xattr_handler * const *xattr; const struct dentry_operations *dops; unsigned long magic; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 78827f312407..6853e29d9674 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -8,7 +8,7 @@ #include <linux/mutex.h> #include <linux/of.h> -MODULE_IMPORT_NS(PWM); +MODULE_IMPORT_NS("PWM"); struct pwm_chip; diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 14dfa6008467..1b11926ddd47 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -30,6 +30,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) * way, we must not access it directly */ #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) +/* + * Return the ->prev pointer of a list_head in an rcu safe way. Don't + * access it directly. + * + * Any list traversed with list_bidir_prev_rcu() must never use + * list_del_rcu(). Doing so will poison the ->prev pointer that + * list_bidir_prev_rcu() relies on, which will result in segfaults. + * To prevent these segfaults, use list_bidir_del_rcu() instead + * of list_del_rcu(). + */ +#define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev))) /** * list_tail_rcu - returns the prev pointer of the head of the list @@ -159,6 +170,39 @@ static inline void list_del_rcu(struct list_head *entry) } /** + * list_bidir_del_rcu - deletes entry from list without re-initialization + * @entry: the element to delete from the list. + * + * In contrast to list_del_rcu() doesn't poison the prev pointer thus + * allowing backwards traversal via list_bidir_prev_rcu(). + * + * Note: list_empty() on entry does not return true after this because + * the entry is in a special undefined state that permits RCU-based + * lockfree reverse traversal. In particular this means that we can not + * poison the forward and backwards pointers that may still be used for + * walking the list. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another list-mutation + * primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on + * this same list. However, it is perfectly legal to run concurrently + * with the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + * + * Note that list_del_rcu() and list_bidir_del_rcu() must not be used on + * the same list. + * + * Note that the caller is not permitted to immediately free + * the newly deleted entry. Instead, either synchronize_rcu() + * or call_rcu() must be used to defer freeing until an RCU + * grace period has elapsed. + */ +static inline void list_bidir_del_rcu(struct list_head *entry) +{ + __list_del_entry(entry); +} + +/** * hlist_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. * diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 8c3c372ad735..bcba3935c6f9 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -168,29 +168,6 @@ int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id); void regulator_put(struct regulator *regulator); void devm_regulator_put(struct regulator *regulator); -#if IS_ENABLED(CONFIG_OF) -struct regulator *__must_check of_regulator_get_optional(struct device *dev, - struct device_node *node, - const char *id); -struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, - struct device_node *node, - const char *id); -#else -static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev, - struct device_node *node, - const char *id) -{ - return ERR_PTR(-ENODEV); -} - -static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, - struct device_node *node, - const char *id) -{ - return ERR_PTR(-ENODEV); -} -#endif - int regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id); @@ -223,8 +200,6 @@ int regulator_disable_deferred(struct regulator *regulator, int ms); int __must_check regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers); -int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np, - struct regulator_bulk_data **consumers); int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers); void devm_regulator_bulk_put(struct regulator_bulk_data *consumers); @@ -373,20 +348,6 @@ devm_regulator_get_optional(struct device *dev, const char *id) return ERR_PTR(-ENODEV); } -static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev, - struct device_node *node, - const char *id) -{ - return ERR_PTR(-ENODEV); -} - -static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, - struct device_node *node, - const char *id) -{ - return ERR_PTR(-ENODEV); -} - static inline void regulator_put(struct regulator *regulator) { } @@ -483,12 +444,6 @@ static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers, return 0; } -static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np, - struct regulator_bulk_data **consumers) -{ - return 0; -} - static inline int devm_regulator_bulk_get_const( struct device *dev, int num_consumers, const struct regulator_bulk_data *in_consumers, @@ -700,6 +655,38 @@ regulator_is_equal(struct regulator *reg1, struct regulator *reg2) } #endif +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_REGULATOR) +struct regulator *__must_check of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id); +struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id); +int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np, + struct regulator_bulk_data **consumers); +#else +static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np, + struct regulator_bulk_data **consumers) +{ + return 0; +} + +#endif + static inline int regulator_set_voltage_triplet(struct regulator *regulator, int min_uV, int target_uV, int max_uV) diff --git a/include/linux/rolling_buffer.h b/include/linux/rolling_buffer.h new file mode 100644 index 000000000000..ac15b1ffdd83 --- /dev/null +++ b/include/linux/rolling_buffer.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Rolling buffer of folios + * + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _ROLLING_BUFFER_H +#define _ROLLING_BUFFER_H + +#include <linux/folio_queue.h> +#include <linux/uio.h> + +/* + * Rolling buffer. Whilst the buffer is live and in use, folios and folio + * queue segments can be added to one end by one thread and removed from the + * other end by another thread. The buffer isn't allowed to be empty; it must + * always have at least one folio_queue in it so that neither side has to + * modify both queue pointers. + * + * The iterator in the buffer is extended as buffers are inserted. It can be + * snapshotted to use a segment of the buffer. + */ +struct rolling_buffer { + struct folio_queue *head; /* Producer's insertion point */ + struct folio_queue *tail; /* Consumer's removal point */ + struct iov_iter iter; /* Iterator tracking what's left in the buffer */ + u8 next_head_slot; /* Next slot in ->head */ + u8 first_tail_slot; /* First slot in ->tail */ +}; + +/* + * Snapshot of a rolling buffer. + */ +struct rolling_buffer_snapshot { + struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */ + unsigned char curr_slot; /* Folio currently being read */ + unsigned char curr_order; /* Order of folio */ +}; + +/* Marks to store per-folio in the internal folio_queue structs. */ +#define ROLLBUF_MARK_1 BIT(0) +#define ROLLBUF_MARK_2 BIT(1) + +int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id, + unsigned int direction); +int rolling_buffer_make_space(struct rolling_buffer *roll); +ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll, + struct readahead_control *ractl, + struct folio_batch *put_batch); +ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio, + unsigned int flags); +struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll); +void rolling_buffer_clear(struct rolling_buffer *roll); + +static inline void rolling_buffer_advance(struct rolling_buffer *roll, size_t amount) +{ + iov_iter_advance(&roll->iter, amount); +} + +#endif /* _ROLLING_BUFFER_H */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index c5e2239b550e..d836e7440ee8 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -313,7 +313,7 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg) } /** - * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address + * sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address * @sg: SG entry * * Description: diff --git a/include/linux/sched.h b/include/linux/sched.h index d380bffee2ef..64934e0830af 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -656,6 +656,12 @@ struct sched_dl_entity { * @dl_defer_armed tells if the deferrable server is waiting * for the replenishment timer to activate it. * + * @dl_server_active tells if the dlserver is active(started). + * dlserver is started on first cfs enqueue on an idle runqueue + * and is stopped when a dequeue results in 0 cfs tasks on the + * runqueue. In other words, dlserver is active only when cpu's + * runqueue has atleast one cfs task. + * * @dl_defer_running tells if the deferrable server is actually * running, skipping the defer phase. */ @@ -664,6 +670,7 @@ struct sched_dl_entity { unsigned int dl_non_contending : 1; unsigned int dl_overrun : 1; unsigned int dl_server : 1; + unsigned int dl_server_active : 1; unsigned int dl_defer : 1; unsigned int dl_defer_armed : 1; unsigned int dl_defer_running : 1; @@ -1630,8 +1637,9 @@ static inline unsigned int __task_state_index(unsigned int tsk_state, * We're lying here, but rather than expose a completely new task state * to userspace, we can make this appear as if the task has gone through * a regular rt_mutex_lock() call. + * Report frozen tasks as uninterruptible. */ - if (tsk_state & TASK_RTLOCK_WAIT) + if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN)) state = TASK_UNINTERRUPTIBLE; return fls(state); diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 341980599c71..e45531455d3b 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -55,10 +55,10 @@ struct seccomp_data; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER static inline int secure_computing(void) { return 0; } -static inline int __secure_computing(const struct seccomp_data *sd) { return 0; } #else static inline void secure_computing_strict(int this_syscall) { return; } #endif +static inline int __secure_computing(const struct seccomp_data *sd) { return 0; } static inline long prctl_get_seccomp(void) { diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 5298765d6ca4..eb20dcaa51b5 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -272,7 +272,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) ({ \ unsigned __seq; \ \ - while ((__seq = seqprop_sequence(s)) & 1) \ + while (unlikely((__seq = seqprop_sequence(s)) & 1)) \ cpu_relax(); \ \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index d9b03e0746e7..2cbe0c22a32f 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -317,17 +317,22 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } -static inline void sk_psock_queue_msg(struct sk_psock *psock, +static inline bool sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { + bool ret; + spin_lock_bh(&psock->ingress_lock); - if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { list_add_tail(&msg->list, &psock->ingress_msg); - else { + ret = true; + } else { sk_msg_free(psock->sk, msg); kfree(msg); + ret = false; } spin_unlock_bh(&psock->ingress_lock); + return ret; } static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index e9ec32fb97d4..2cc21ffcdaf9 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -147,7 +147,7 @@ static inline int stack_depot_early_init(void) { return 0; } * If the provided stack trace comes from the interrupt context, only the part * up to the interrupt entry is saved. * - * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if + * Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if * alloc_pages() cannot be used from the current context. Currently * this is the case for contexts where neither %GFP_ATOMIC nor * %GFP_NOWAIT can be used (NMI, raw_spin_lock). @@ -156,7 +156,7 @@ static inline int stack_depot_early_init(void) { return 0; } */ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, unsigned int nr_entries, - gfp_t gfp_flags, + gfp_t alloc_flags, depot_flags_t depot_flags); /** @@ -175,7 +175,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, * Return: Handle of the stack trace stored in depot, 0 on failure */ depot_stack_handle_t stack_depot_save(unsigned long *entries, - unsigned int nr_entries, gfp_t gfp_flags); + unsigned int nr_entries, gfp_t alloc_flags); /** * __stack_depot_get_stack_record - Get a pointer to a stack_record struct diff --git a/include/linux/static_call.h b/include/linux/static_call.h index 141e6b176a1b..78a77a4ae0ea 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -160,6 +160,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool #ifdef CONFIG_HAVE_STATIC_CALL_INLINE +extern int static_call_initialized; + extern int __init static_call_init(void); extern void static_call_force_reinit(void); @@ -225,6 +227,8 @@ extern long __static_call_return0(void); #elif defined(CONFIG_HAVE_STATIC_CALL) +#define static_call_initialized 0 + static inline int static_call_init(void) { return 0; } #define DEFINE_STATIC_CALL(name, _func) \ @@ -281,6 +285,8 @@ extern long __static_call_return0(void); #else /* Generic implementation */ +#define static_call_initialized 0 + static inline int static_call_init(void) { return 0; } static inline long __static_call_return0(void) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 2a5df5b62cfc..58ad4ead33fc 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -273,7 +273,8 @@ struct trace_event_fields { const char *name; const int size; const int align; - const int is_signed; + const unsigned int is_signed:1; + unsigned int needs_test:1; const int filter_type; const int len; }; @@ -324,6 +325,7 @@ enum { TRACE_EVENT_FL_EPROBE_BIT, TRACE_EVENT_FL_FPROBE_BIT, TRACE_EVENT_FL_CUSTOM_BIT, + TRACE_EVENT_FL_TEST_STR_BIT, }; /* @@ -340,6 +342,7 @@ enum { * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint) * This is set when the custom event has not been attached * to a tracepoint yet, then it is cleared when it is. + * TEST_STR - The event has a "%s" that points to a string outside the event */ enum { TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), @@ -352,6 +355,7 @@ enum { TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT), TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT), + TRACE_EVENT_FL_TEST_STR = (1 << TRACE_EVENT_FL_TEST_STR_BIT), }; #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) @@ -360,7 +364,7 @@ struct trace_event_call { struct list_head list; struct trace_event_class *class; union { - char *name; + const char *name; /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ struct tracepoint *tp; }; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index cb40f1a1d081..75342022d144 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -247,6 +247,13 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma, vma_is_shmem(vma); } +static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) +{ + struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx; + + return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0; +} + extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); extern void dup_userfaultfd_complete(struct list_head *); void dup_userfaultfd_fail(struct list_head *); @@ -402,6 +409,11 @@ static inline bool userfaultfd_wp_async(struct vm_area_struct *vma) return false; } +static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) +{ + return false; +} + #endif /* CONFIG_USERFAULTFD */ static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma) diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index a54046bf37e5..939ceabcaf06 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -15,10 +15,10 @@ #else #define MODULE_VERMAGIC_SMP "" #endif -#ifdef CONFIG_PREEMPT_BUILD -#define MODULE_VERMAGIC_PREEMPT "preempt " -#elif defined(CONFIG_PREEMPT_RT) +#ifdef CONFIG_PREEMPT_RT #define MODULE_VERMAGIC_PREEMPT "preempt_rt " +#elif defined(CONFIG_PREEMPT_BUILD) +#define MODULE_VERMAGIC_PREEMPT "preempt " #else #define MODULE_VERMAGIC_PREEMPT "" #endif diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 57cc4b07fd17..dd88682e27e3 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -109,9 +109,11 @@ dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq); dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq); int virtqueue_resize(struct virtqueue *vq, u32 num, - void (*recycle)(struct virtqueue *vq, void *buf)); + void (*recycle)(struct virtqueue *vq, void *buf), + void (*recycle_done)(struct virtqueue *vq)); int virtqueue_reset(struct virtqueue *vq, - void (*recycle)(struct virtqueue *vq, void *buf)); + void (*recycle)(struct virtqueue *vq, void *buf), + void (*recycle_done)(struct virtqueue *vq)); struct virtio_admin_cmd { __le16 opcode; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index d2761bf8ff32..9f3a04345b86 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -515,7 +515,7 @@ static inline const char *node_stat_name(enum node_stat_item item) static inline const char *lru_list_name(enum lru_list lru) { - return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" + return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_" } #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index a401a2f31a77..45ff6f7a872b 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -156,8 +156,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, &ww_class->acquire_key, 0); - lockdep_init_map(&ctx->first_lock_dep_map, ww_class->mutex_name, - &ww_class->mutex_key, 0); + lockdep_init_map_wait(&ctx->first_lock_dep_map, ww_class->mutex_name, + &ww_class->mutex_key, 0, LD_WAIT_SLEEP); mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_); #endif diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index f66bc85c6411..435250c72d56 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -123,6 +123,7 @@ struct bt_voice { #define BT_VOICE_TRANSPARENT 0x0003 #define BT_VOICE_CVSD_16BIT 0x0060 +#define BT_VOICE_TRANSPARENT_16BIT 0x0063 #define BT_SNDMTU 12 #define BT_RCVMTU 13 @@ -590,15 +591,6 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk, return skb; } -static inline int bt_copy_from_sockptr(void *dst, size_t dst_size, - sockptr_t src, size_t src_size) -{ - if (dst_size > src_size) - return -EINVAL; - - return copy_from_sockptr(dst, src, dst_size); -} - int bt_to_errno(u16 code); __u8 bt_status(int err); diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index ea798f07c5a2..ca22ead85dbe 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -804,7 +804,6 @@ struct hci_conn_params { extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; -extern struct mutex hci_cb_list_lock; #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) @@ -2017,24 +2016,47 @@ struct hci_cb { char *name; + bool (*match) (struct hci_conn *conn); void (*connect_cfm) (struct hci_conn *conn, __u8 status); void (*disconn_cfm) (struct hci_conn *conn, __u8 status); void (*security_cfm) (struct hci_conn *conn, __u8 status, - __u8 encrypt); + __u8 encrypt); void (*key_change_cfm) (struct hci_conn *conn, __u8 status); void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); }; +static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list) +{ + struct hci_cb *cb, *cpy; + + rcu_read_lock(); + list_for_each_entry_rcu(cb, &hci_cb_list, list) { + if (cb->match && cb->match(conn)) { + cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC); + if (!cpy) + break; + + *cpy = *cb; + INIT_LIST_HEAD(&cpy->list); + list_add_rcu(&cpy->list, list); + } + } + rcu_read_unlock(); +} + static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) { - struct hci_cb *cb; + struct list_head list; + struct hci_cb *cb, *tmp; + + INIT_LIST_HEAD(&list); + hci_cb_lookup(conn, &list); - mutex_lock(&hci_cb_list_lock); - list_for_each_entry(cb, &hci_cb_list, list) { + list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->connect_cfm) cb->connect_cfm(conn, status); + kfree(cb); } - mutex_unlock(&hci_cb_list_lock); if (conn->connect_cfm_cb) conn->connect_cfm_cb(conn, status); @@ -2042,43 +2064,55 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) { - struct hci_cb *cb; + struct list_head list; + struct hci_cb *cb, *tmp; + + INIT_LIST_HEAD(&list); + hci_cb_lookup(conn, &list); - mutex_lock(&hci_cb_list_lock); - list_for_each_entry(cb, &hci_cb_list, list) { + list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->disconn_cfm) cb->disconn_cfm(conn, reason); + kfree(cb); } - mutex_unlock(&hci_cb_list_lock); if (conn->disconn_cfm_cb) conn->disconn_cfm_cb(conn, reason); } -static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) +static inline void hci_security_cfm(struct hci_conn *conn, __u8 status, + __u8 encrypt) { - struct hci_cb *cb; - __u8 encrypt; - - if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) - return; + struct list_head list; + struct hci_cb *cb, *tmp; - encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; + INIT_LIST_HEAD(&list); + hci_cb_lookup(conn, &list); - mutex_lock(&hci_cb_list_lock); - list_for_each_entry(cb, &hci_cb_list, list) { + list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); + kfree(cb); } - mutex_unlock(&hci_cb_list_lock); if (conn->security_cfm_cb) conn->security_cfm_cb(conn, status); } +static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) +{ + __u8 encrypt; + + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) + return; + + encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; + + hci_security_cfm(conn, status, encrypt); +} + static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { - struct hci_cb *cb; __u8 encrypt; if (conn->state == BT_CONFIG) { @@ -2105,40 +2139,38 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) conn->sec_level = conn->pending_sec_level; } - mutex_lock(&hci_cb_list_lock); - list_for_each_entry(cb, &hci_cb_list, list) { - if (cb->security_cfm) - cb->security_cfm(conn, status, encrypt); - } - mutex_unlock(&hci_cb_list_lock); - - if (conn->security_cfm_cb) - conn->security_cfm_cb(conn, status); + hci_security_cfm(conn, status, encrypt); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { - struct hci_cb *cb; + struct list_head list; + struct hci_cb *cb, *tmp; + + INIT_LIST_HEAD(&list); + hci_cb_lookup(conn, &list); - mutex_lock(&hci_cb_list_lock); - list_for_each_entry(cb, &hci_cb_list, list) { + list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->key_change_cfm) cb->key_change_cfm(conn, status); + kfree(cb); } - mutex_unlock(&hci_cb_list_lock); } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) { - struct hci_cb *cb; + struct list_head list; + struct hci_cb *cb, *tmp; + + INIT_LIST_HEAD(&list); + hci_cb_lookup(conn, &list); - mutex_lock(&hci_cb_list_lock); - list_for_each_entry(cb, &hci_cb_list, list) { + list_for_each_entry_safe(cb, tmp, &list, list) { if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); + kfree(cb); } - mutex_unlock(&hci_cb_list_lock); } static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index c858270141bc..c39a426ebf52 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -174,12 +174,4 @@ static inline void sk_mark_napi_id_once(struct sock *sk, #endif } -static inline void sk_mark_napi_id_once_xdp(struct sock *sk, - const struct xdp_buff *xdp) -{ -#ifdef CONFIG_NET_RX_BUSY_POLL - __sk_mark_napi_id_once(sk, xdp->rxq->napi_id); -#endif -} - #endif /* _LINUX_NET_BUSY_POLL_H */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 3c82fad904d4..c7f42844c79a 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk) static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) { - return inet_csk_reqsk_queue_len(sk) >= READ_ONCE(sk->sk_max_ack_backlog); + return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog); } bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index beb533a0e880..62c0a7e65d6b 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -45,6 +45,8 @@ struct inet_timewait_sock { #define tw_node __tw_common.skc_nulls_node #define tw_bind_node __tw_common.skc_bind_node #define tw_refcnt __tw_common.skc_refcnt +#define tw_tx_queue_mapping __tw_common.skc_tx_queue_mapping +#define tw_rx_queue_mapping __tw_common.skc_rx_queue_mapping #define tw_hash __tw_common.skc_hash #define tw_prot __tw_common.skc_prot #define tw_net __tw_common.skc_net diff --git a/include/net/lapb.h b/include/net/lapb.h index 124ee122f2c8..6c07420644e4 100644 --- a/include/net/lapb.h +++ b/include/net/lapb.h @@ -4,7 +4,7 @@ #include <linux/lapb.h> #include <linux/refcount.h> -#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */ +#define LAPB_HEADER_LEN MAX_HEADER /* LAPB over Ethernet + a bit more */ #define LAPB_ACK_PENDING_CONDITION 0x01 #define LAPB_REJECT_CONDITION 0x02 diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a97c9f85ae9a..ab8dce1f2c27 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1987,6 +1987,8 @@ enum ieee80211_neg_ttlm_res { * @neg_ttlm: negotiated TID to link mapping info. * see &struct ieee80211_neg_ttlm. * @addr: address of this interface + * @addr_valid: indicates if the address is actively used. Set to false for + * passive monitor interfaces, true in all other cases. * @p2p: indicates whether this AP or STA interface is a p2p * interface, i.e. a GO or p2p-sta respectively * @netdev_features: tx netdev features supported by the hardware for this @@ -2026,6 +2028,7 @@ struct ieee80211_vif { u16 valid_links, active_links, dormant_links, suspended_links; struct ieee80211_neg_ttlm neg_ttlm; u8 addr[ETH_ALEN] __aligned(2); + bool addr_valid; bool p2p; u8 cab_queue; @@ -6795,14 +6798,12 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success, /** * ieee80211_channel_switch_disconnect - disconnect due to channel switch error * @vif: &struct ieee80211_vif pointer from the add_interface callback. - * @block_tx: if %true, do not send deauth frame. * * Instruct mac80211 to disconnect due to a channel switch error. The channel * switch can request to block the tx and so, we need to make sure we do not send * a deauth frame in this case. */ -void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, - bool block_tx); +void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif); /** * ieee80211_request_smps - request SM PS transition diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 873c0f9fdac6..5a2a0df8ad91 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -80,6 +80,7 @@ struct net { * or to unregister pernet ops * (pernet_ops_rwsem write locked). */ + struct llist_node defer_free_list; struct llist_node cleanup_list; /* namespaces on death row */ #ifdef CONFIG_KEYS @@ -325,6 +326,11 @@ static inline int check_net(const struct net *net) #define net_drop_ns NULL #endif +/* Returns true if the netns initialization is completed successfully */ +static inline bool net_initialized(const struct net *net) +{ + return READ_ONCE(net->list.next); +} static inline void __netns_tracker_alloc(struct net *net, netns_tracker *tracker, diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 80a537ac26cd..0027beca5cd5 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -733,15 +733,18 @@ struct nft_set_ext_tmpl { /** * struct nft_set_ext - set extensions * - * @genmask: generation mask + * @genmask: generation mask, but also flags (see NFT_SET_ELEM_DEAD_BIT) * @offset: offsets of individual extension types * @data: beginning of extension data + * + * This structure must be aligned to word size, otherwise atomic bitops + * on genmask field can cause alignment failure on some archs. */ struct nft_set_ext { u8 genmask; u8 offset[NFT_SET_EXT_NUM]; char data[]; -}; +} __aligned(BITS_PER_LONG / 8); static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl) { @@ -1103,7 +1106,6 @@ struct nft_rule_blob { * @name: name of the chain * @udlen: user data length * @udata: user data in the chain - * @rcu_head: rcu head for deferred release * @blob_next: rule blob pointer to the next in the chain */ struct nft_chain { @@ -1121,7 +1123,6 @@ struct nft_chain { char *name; u16 udlen; u8 *udata; - struct rcu_head rcu_head; /* Only used during control plane commit phase: */ struct nft_rule_blob *blob_next; @@ -1265,7 +1266,6 @@ static inline void nft_use_inc_restore(u32 *use) * @sets: sets in the table * @objects: stateful objects in the table * @flowtables: flow tables in the table - * @net: netnamespace this table belongs to * @hgenerator: handle generator state * @handle: table handle * @use: number of chain references to this table @@ -1285,7 +1285,6 @@ struct nft_table { struct list_head sets; struct list_head objects; struct list_head flowtables; - possible_net_t net; u64 hgenerator; u64 handle; u32 use; diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index ff27cb2e1662..03b6165756fc 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -161,6 +161,7 @@ enum { }; struct nft_inner_tun_ctx { + unsigned long cookie; u16 type; u16 inner_tunoff; u16 inner_lloff; diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h index 793e6fd78bc5..60a5347922be 100644 --- a/include/net/page_pool/helpers.h +++ b/include/net/page_pool/helpers.h @@ -294,7 +294,7 @@ static inline long page_pool_unref_page(struct page *page, long nr) static inline void page_pool_ref_netmem(netmem_ref netmem) { - atomic_long_inc(&netmem_to_page(netmem)->pp_ref_count); + atomic_long_inc(netmem_get_pp_ref_count_ref(netmem)); } static inline void page_pool_ref_page(struct page *page) diff --git a/include/net/sock.h b/include/net/sock.h index 7464e9f9f47c..691ca7695d1d 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1527,7 +1527,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size) } static inline bool -sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) +__sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc) { int delta; @@ -1535,7 +1535,13 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) return true; delta = size - sk->sk_forward_alloc; return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || - skb_pfmemalloc(skb); + pfmemalloc; +} + +static inline bool +sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) +{ + return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb)); } static inline int sk_unused_reserved_mem(const struct sock *sk) @@ -2291,7 +2297,7 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq) } /** - * sock_poll_wait - place memory barrier behind the poll_wait call. + * sock_poll_wait - wrapper for the poll_wait call. * @filp: file * @sock: socket to wait on * @p: poll_table @@ -2301,15 +2307,12 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq) static inline void sock_poll_wait(struct file *filp, struct socket *sock, poll_table *p) { - if (!poll_does_not_wait(p)) { - poll_wait(filp, &sock->wq.wait, p); - /* We need to be sure we are in sync with the - * socket flags modification. - * - * This memory barrier is paired in the wq_has_sleeper. - */ - smp_mb(); - } + /* Provides a barrier we need to be sure we are in sync + * with the socket flags modification. + * + * This memory barrier is paired in the wq_has_sleeper. + */ + poll_wait(filp, &sock->wq.wait, p); } static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) diff --git a/include/net/xdp.h b/include/net/xdp.h index e6770dd40c91..b5b10f2b88e5 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -62,7 +62,6 @@ struct xdp_rxq_info { u32 queue_index; u32 reg_state; struct xdp_mem_info mem; - unsigned int napi_id; u32 frag_size; } ____cacheline_aligned; /* perf critical, avoid false-sharing */ diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 40085afd9160..7a7316d9c0da 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -59,15 +59,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, xp_fill_cb(pool, desc); } -static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) -{ -#ifdef CONFIG_NET_RX_BUSY_POLL - return pool->heads[0].xdp.rxq->napi_id; -#else - return 0; -#endif -} - static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) { @@ -306,11 +297,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, { } -static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) -{ - return 0; -} - static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) { diff --git a/include/soc/arc/aux.h b/include/soc/arc/arc_aux.h index 9c2eff6140b6..9c2eff6140b6 100644 --- a/include/soc/arc/aux.h +++ b/include/soc/arc/arc_aux.h diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h index d1a93c73f006..a78dacd149f1 100644 --- a/include/soc/arc/mcip.h +++ b/include/soc/arc/mcip.h @@ -8,7 +8,7 @@ #ifndef __SOC_ARC_MCIP_H #define __SOC_ARC_MCIP_H -#include <soc/arc/aux.h> +#include <soc/arc/arc_aux.h> #define ARC_REG_MCIP_BCR 0x0d0 #define ARC_REG_MCIP_IDU_BCR 0x0D5 diff --git a/include/soc/arc/timers.h b/include/soc/arc/timers.h index ae99d3e855f1..51a74166296c 100644 --- a/include/soc/arc/timers.h +++ b/include/soc/arc/timers.h @@ -6,7 +6,7 @@ #ifndef __SOC_ARC_TIMERS_H #define __SOC_ARC_TIMERS_H -#include <soc/arc/aux.h> +#include <soc/arc/arc_aux.h> /* Timer related Aux registers */ #define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 462c653e1017..2db9ae0575b6 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -778,7 +778,6 @@ struct ocelot_port { phy_interface_t phy_mode; - unsigned int ptp_skbs_in_flight; struct sk_buff_head tx_skbs; unsigned int trap_proto; @@ -786,7 +785,6 @@ struct ocelot_port { u16 mrp_ring_id; u8 ptp_cmd; - u8 ts_id; u8 index; diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h index 94e8185c4795..3dc7a1551ac3 100644 --- a/include/sound/cs35l56.h +++ b/include/sound/cs35l56.h @@ -271,12 +271,6 @@ struct cs35l56_base { struct gpio_desc *reset_gpio; }; -/* Temporary to avoid a build break with the HDA driver */ -static inline int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base) -{ - return 0; -} - static inline bool cs35l56_is_otp_register(unsigned int reg) { return (reg >> 16) == 3; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index a0aed1a428a1..2e92487f3f34 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -118,6 +118,8 @@ enum yfs_cm_operation { */ #define afs_call_traces \ EM(afs_call_trace_alloc, "ALLOC") \ + EM(afs_call_trace_async_abort, "ASYAB") \ + EM(afs_call_trace_async_kill, "ASYKL") \ EM(afs_call_trace_free, "FREE ") \ EM(afs_call_trace_get, "GET ") \ EM(afs_call_trace_put, "PUT ") \ @@ -323,6 +325,44 @@ enum yfs_cm_operation { EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \ E_(yfs_CB_CallBack, "YFSCB.CallBack") +#define afs_cb_promise_traces \ + EM(afs_cb_promise_clear_cb_break, "CLEAR cb-break") \ + EM(afs_cb_promise_clear_rmdir, "CLEAR rmdir") \ + EM(afs_cb_promise_clear_rotate_server, "CLEAR rot-srv") \ + EM(afs_cb_promise_clear_server_change, "CLEAR srv-chg") \ + EM(afs_cb_promise_clear_vol_init_cb, "CLEAR vol-init-cb") \ + EM(afs_cb_promise_set_apply_cb, "SET apply-cb") \ + EM(afs_cb_promise_set_new_inode, "SET new-inode") \ + E_(afs_cb_promise_set_new_symlink, "SET new-symlink") + +#define afs_vnode_invalid_traces \ + EM(afs_vnode_invalid_trace_cb_ro_snapshot, "cb-ro-snapshot") \ + EM(afs_vnode_invalid_trace_cb_scrub, "cb-scrub") \ + EM(afs_vnode_invalid_trace_cb_v_break, "cb-v-break") \ + EM(afs_vnode_invalid_trace_expired, "expired") \ + EM(afs_vnode_invalid_trace_no_cb_promise, "no-cb-promise") \ + EM(afs_vnode_invalid_trace_vol_expired, "vol-expired") \ + EM(afs_vnode_invalid_trace_zap_data, "zap-data") \ + E_(afs_vnode_valid_trace, "valid") + +#define afs_dir_invalid_traces \ + EM(afs_dir_invalid_edit_add_bad_size, "edit-add-bad-size") \ + EM(afs_dir_invalid_edit_add_no_slots, "edit-add-no-slots") \ + EM(afs_dir_invalid_edit_add_too_many_blocks, "edit-add-too-many-blocks") \ + EM(afs_dir_invalid_edit_get_block, "edit-get-block") \ + EM(afs_dir_invalid_edit_mkdir, "edit-mkdir") \ + EM(afs_dir_invalid_edit_rem_bad_size, "edit-rem-bad-size") \ + EM(afs_dir_invalid_edit_rem_wrong_name, "edit-rem-wrong_name") \ + EM(afs_dir_invalid_edit_upd_bad_size, "edit-upd-bad-size") \ + EM(afs_dir_invalid_edit_upd_no_dd, "edit-upd-no-dotdot") \ + EM(afs_dir_invalid_dv_mismatch, "dv-mismatch") \ + EM(afs_dir_invalid_inval_folio, "inv-folio") \ + EM(afs_dir_invalid_iter_stale, "iter-stale") \ + EM(afs_dir_invalid_reclaimed_folio, "reclaimed-folio") \ + EM(afs_dir_invalid_release_folio, "rel-folio") \ + EM(afs_dir_invalid_remote, "remote") \ + E_(afs_dir_invalid_subdir_removed, "subdir-removed") + #define afs_edit_dir_ops \ EM(afs_edit_dir_create, "create") \ EM(afs_edit_dir_create_error, "c_fail") \ @@ -332,6 +372,7 @@ enum yfs_cm_operation { EM(afs_edit_dir_delete_error, "d_err ") \ EM(afs_edit_dir_delete_inval, "d_invl") \ EM(afs_edit_dir_delete_noent, "d_nent") \ + EM(afs_edit_dir_mkdir, "mk_ent") \ EM(afs_edit_dir_update_dd, "u_ddot") \ EM(afs_edit_dir_update_error, "u_fail") \ EM(afs_edit_dir_update_inval, "u_invl") \ @@ -385,6 +426,7 @@ enum yfs_cm_operation { EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \ EM(afs_file_error_dir_small, "DIR_SMALL") \ EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \ + EM(afs_file_error_symlink_big, "SYM_BIG") \ EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \ E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED") @@ -487,7 +529,9 @@ enum yfs_cm_operation { enum afs_alist_trace { afs_alist_traces } __mode(byte); enum afs_call_trace { afs_call_traces } __mode(byte); enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte); +enum afs_cb_promise_trace { afs_cb_promise_traces } __mode(byte); enum afs_cell_trace { afs_cell_traces } __mode(byte); +enum afs_dir_invalid_trace { afs_dir_invalid_traces} __mode(byte); enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte); enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte); enum afs_eproto_cause { afs_eproto_causes } __mode(byte); @@ -498,6 +542,7 @@ enum afs_flock_operation { afs_flock_operations } __mode(byte); enum afs_io_error { afs_io_errors } __mode(byte); enum afs_rotate_trace { afs_rotate_traces } __mode(byte); enum afs_server_trace { afs_server_traces } __mode(byte); +enum afs_vnode_invalid_trace { afs_vnode_invalid_traces} __mode(byte); enum afs_volume_trace { afs_volume_traces } __mode(byte); #endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */ @@ -513,8 +558,10 @@ enum afs_volume_trace { afs_volume_traces } __mode(byte); afs_alist_traces; afs_call_traces; afs_cb_break_reasons; +afs_cb_promise_traces; afs_cell_traces; afs_cm_operations; +afs_dir_invalid_traces; afs_edit_dir_ops; afs_edit_dir_reasons; afs_eproto_causes; @@ -526,6 +573,7 @@ afs_fs_operations; afs_io_errors; afs_rotate_traces; afs_server_traces; +afs_vnode_invalid_traces; afs_vl_operations; yfs_cm_operations; @@ -670,7 +718,7 @@ TRACE_EVENT(afs_make_fs_call, } ), - TP_printk("c=%08x %06llx:%06llx:%06x %s", + TP_printk("c=%08x V=%llx i=%llx:%x %s", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -704,7 +752,7 @@ TRACE_EVENT(afs_make_fs_calli, } ), - TP_printk("c=%08x %06llx:%06llx:%06x %s i=%u", + TP_printk("c=%08x V=%llx i=%llx:%x %s i=%u", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -741,7 +789,7 @@ TRACE_EVENT(afs_make_fs_call1, __entry->name[__len] = 0; ), - TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\"", + TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\"", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -782,7 +830,7 @@ TRACE_EVENT(afs_make_fs_call2, __entry->name2[__len2] = 0; ), - TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\" \"%s\"", + TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\" \"%s\"", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -887,9 +935,9 @@ TRACE_EVENT(afs_sent_data, ); TRACE_EVENT(afs_dir_check_failed, - TP_PROTO(struct afs_vnode *vnode, loff_t off, loff_t i_size), + TP_PROTO(struct afs_vnode *vnode, loff_t off), - TP_ARGS(vnode, off, i_size), + TP_ARGS(vnode, off), TP_STRUCT__entry( __field(struct afs_vnode *, vnode) @@ -900,7 +948,7 @@ TRACE_EVENT(afs_dir_check_failed, TP_fast_assign( __entry->vnode = vnode; __entry->off = off; - __entry->i_size = i_size; + __entry->i_size = i_size_read(&vnode->netfs.inode); ), TP_printk("vn=%p %llx/%llx", @@ -1002,7 +1050,7 @@ TRACE_EVENT(afs_edit_dir, __entry->name[__len] = 0; ), - TP_printk("d=%x:%x %s %s %u[%u] f=%x:%x \"%s\"", + TP_printk("di=%x:%x %s %s %u[%u] fi=%x:%x \"%s\"", __entry->vnode, __entry->unique, __print_symbolic(__entry->why, afs_edit_dir_reasons), __print_symbolic(__entry->op, afs_edit_dir_ops), @@ -1011,6 +1059,122 @@ TRACE_EVENT(afs_edit_dir, __entry->name) ); +TRACE_EVENT(afs_dir_invalid, + TP_PROTO(const struct afs_vnode *dvnode, enum afs_dir_invalid_trace trace), + + TP_ARGS(dvnode, trace), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(enum afs_dir_invalid_trace, trace) + ), + + TP_fast_assign( + __entry->vnode = dvnode->fid.vnode; + __entry->unique = dvnode->fid.unique; + __entry->trace = trace; + ), + + TP_printk("di=%x:%x %s", + __entry->vnode, __entry->unique, + __print_symbolic(__entry->trace, afs_dir_invalid_traces)) + ); + +TRACE_EVENT(afs_cb_promise, + TP_PROTO(const struct afs_vnode *vnode, enum afs_cb_promise_trace trace), + + TP_ARGS(vnode, trace), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(enum afs_cb_promise_trace, trace) + ), + + TP_fast_assign( + __entry->vnode = vnode->fid.vnode; + __entry->unique = vnode->fid.unique; + __entry->trace = trace; + ), + + TP_printk("di=%x:%x %s", + __entry->vnode, __entry->unique, + __print_symbolic(__entry->trace, afs_cb_promise_traces)) + ); + +TRACE_EVENT(afs_vnode_invalid, + TP_PROTO(const struct afs_vnode *vnode, enum afs_vnode_invalid_trace trace), + + TP_ARGS(vnode, trace), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(enum afs_vnode_invalid_trace, trace) + ), + + TP_fast_assign( + __entry->vnode = vnode->fid.vnode; + __entry->unique = vnode->fid.unique; + __entry->trace = trace; + ), + + TP_printk("di=%x:%x %s", + __entry->vnode, __entry->unique, + __print_symbolic(__entry->trace, afs_vnode_invalid_traces)) + ); + +TRACE_EVENT(afs_set_dv, + TP_PROTO(const struct afs_vnode *dvnode, u64 new_dv), + + TP_ARGS(dvnode, new_dv), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(u64, old_dv) + __field(u64, new_dv) + ), + + TP_fast_assign( + __entry->vnode = dvnode->fid.vnode; + __entry->unique = dvnode->fid.unique; + __entry->old_dv = dvnode->status.data_version; + __entry->new_dv = new_dv; + ), + + TP_printk("di=%x:%x dv=%llx -> dv=%llx", + __entry->vnode, __entry->unique, + __entry->old_dv, __entry->new_dv) + ); + +TRACE_EVENT(afs_dv_mismatch, + TP_PROTO(const struct afs_vnode *dvnode, u64 before_dv, int delta, u64 new_dv), + + TP_ARGS(dvnode, before_dv, delta, new_dv), + + TP_STRUCT__entry( + __field(unsigned int, vnode) + __field(unsigned int, unique) + __field(int, delta) + __field(u64, before_dv) + __field(u64, new_dv) + ), + + TP_fast_assign( + __entry->vnode = dvnode->fid.vnode; + __entry->unique = dvnode->fid.unique; + __entry->delta = delta; + __entry->before_dv = before_dv; + __entry->new_dv = new_dv; + ), + + TP_printk("di=%x:%x xdv=%llx+%d dv=%llx", + __entry->vnode, __entry->unique, + __entry->before_dv, __entry->delta, __entry->new_dv) + ); + TRACE_EVENT(afs_protocol_error, TP_PROTO(struct afs_call *call, enum afs_eproto_cause cause), @@ -1611,6 +1775,36 @@ TRACE_EVENT(afs_make_call, __entry->fid.unique) ); +TRACE_EVENT(afs_read_recv, + TP_PROTO(const struct afs_operation *op, const struct afs_call *call), + + TP_ARGS(op, call), + + TP_STRUCT__entry( + __field(unsigned int, rreq) + __field(unsigned int, sreq) + __field(unsigned int, op) + __field(unsigned int, op_flags) + __field(unsigned int, call) + __field(enum afs_call_state, call_state) + ), + + TP_fast_assign( + __entry->op = op->debug_id; + __entry->sreq = op->fetch.subreq->debug_index; + __entry->rreq = op->fetch.subreq->rreq->debug_id; + __entry->op_flags = op->flags; + __entry->call = call->debug_id; + __entry->call_state = call->state; + ), + + TP_printk("R=%08x[%x] OP=%08x c=%08x cs=%x of=%x", + __entry->rreq, __entry->sreq, + __entry->op, + __entry->call, __entry->call_state, + __entry->op_flags) + ); + #endif /* _TRACE_AFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index 7d931db02b93..a743b2a35ea7 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -223,10 +223,10 @@ TRACE_EVENT(cachefiles_ref, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, cookie ) - __field(enum cachefiles_obj_ref_trace, why ) - __field(int, usage ) + __field(unsigned int, obj) + __field(unsigned int, cookie) + __field(enum cachefiles_obj_ref_trace, why) + __field(int, usage) ), TP_fast_assign( @@ -249,10 +249,10 @@ TRACE_EVENT(cachefiles_lookup, TP_ARGS(obj, dir, de), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(short, error ) - __field(unsigned long, dino ) - __field(unsigned long, ino ) + __field(unsigned int, obj) + __field(short, error) + __field(unsigned long, dino) + __field(unsigned long, ino) ), TP_fast_assign( @@ -273,8 +273,8 @@ TRACE_EVENT(cachefiles_mkdir, TP_ARGS(dir, subdir), TP_STRUCT__entry( - __field(unsigned int, dir ) - __field(unsigned int, subdir ) + __field(unsigned int, dir) + __field(unsigned int, subdir) ), TP_fast_assign( @@ -293,8 +293,8 @@ TRACE_EVENT(cachefiles_tmpfile, TP_ARGS(obj, backer), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) + __field(unsigned int, obj) + __field(unsigned int, backer) ), TP_fast_assign( @@ -313,8 +313,8 @@ TRACE_EVENT(cachefiles_link, TP_ARGS(obj, backer), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) + __field(unsigned int, obj) + __field(unsigned int, backer) ), TP_fast_assign( @@ -336,9 +336,9 @@ TRACE_EVENT(cachefiles_unlink, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, ino ) - __field(enum fscache_why_object_killed, why ) + __field(unsigned int, obj) + __field(unsigned int, ino) + __field(enum fscache_why_object_killed, why) ), TP_fast_assign( @@ -361,9 +361,9 @@ TRACE_EVENT(cachefiles_rename, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, ino ) - __field(enum fscache_why_object_killed, why ) + __field(unsigned int, obj) + __field(unsigned int, ino) + __field(enum fscache_why_object_killed, why) ), TP_fast_assign( @@ -380,17 +380,20 @@ TRACE_EVENT(cachefiles_rename, TRACE_EVENT(cachefiles_coherency, TP_PROTO(struct cachefiles_object *obj, ino_t ino, + u64 disk_aux, enum cachefiles_content content, enum cachefiles_coherency_trace why), - TP_ARGS(obj, ino, content, why), + TP_ARGS(obj, ino, disk_aux, content, why), /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(enum cachefiles_coherency_trace, why ) - __field(enum cachefiles_content, content ) - __field(u64, ino ) + __field(unsigned int, obj) + __field(enum cachefiles_coherency_trace, why) + __field(enum cachefiles_content, content) + __field(u64, ino) + __field(u64, aux) + __field(u64, disk_aux) ), TP_fast_assign( @@ -398,13 +401,17 @@ TRACE_EVENT(cachefiles_coherency, __entry->why = why; __entry->content = content; __entry->ino = ino; + __entry->aux = be64_to_cpup((__be64 *)obj->cookie->inline_aux); + __entry->disk_aux = disk_aux; ), - TP_printk("o=%08x %s B=%llx c=%u", + TP_printk("o=%08x %s B=%llx c=%u aux=%llx dsk=%llx", __entry->obj, __print_symbolic(__entry->why, cachefiles_coherency_traces), __entry->ino, - __entry->content) + __entry->content, + __entry->aux, + __entry->disk_aux) ); TRACE_EVENT(cachefiles_vol_coherency, @@ -416,9 +423,9 @@ TRACE_EVENT(cachefiles_vol_coherency, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, vol ) - __field(enum cachefiles_coherency_trace, why ) - __field(u64, ino ) + __field(unsigned int, vol) + __field(enum cachefiles_coherency_trace, why) + __field(u64, ino) ), TP_fast_assign( @@ -445,14 +452,14 @@ TRACE_EVENT(cachefiles_prep_read, TP_ARGS(obj, start, len, flags, source, why, cache_inode, netfs_inode), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned short, flags ) - __field(enum netfs_io_source, source ) - __field(enum cachefiles_prepare_read_trace, why ) - __field(size_t, len ) - __field(loff_t, start ) - __field(unsigned int, netfs_inode ) - __field(unsigned int, cache_inode ) + __field(unsigned int, obj) + __field(unsigned short, flags) + __field(enum netfs_io_source, source) + __field(enum cachefiles_prepare_read_trace, why) + __field(size_t, len) + __field(loff_t, start) + __field(unsigned int, netfs_inode) + __field(unsigned int, cache_inode) ), TP_fast_assign( @@ -484,10 +491,10 @@ TRACE_EVENT(cachefiles_read, TP_ARGS(obj, backer, start, len), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(size_t, len ) - __field(loff_t, start ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(size_t, len) + __field(loff_t, start) ), TP_fast_assign( @@ -513,10 +520,10 @@ TRACE_EVENT(cachefiles_write, TP_ARGS(obj, backer, start, len), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(size_t, len ) - __field(loff_t, start ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(size_t, len) + __field(loff_t, start) ), TP_fast_assign( @@ -540,11 +547,11 @@ TRACE_EVENT(cachefiles_trunc, TP_ARGS(obj, backer, from, to, why), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(enum cachefiles_trunc_trace, why ) - __field(loff_t, from ) - __field(loff_t, to ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(enum cachefiles_trunc_trace, why) + __field(loff_t, from) + __field(loff_t, to) ), TP_fast_assign( @@ -571,8 +578,8 @@ TRACE_EVENT(cachefiles_mark_active, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(ino_t, inode ) + __field(unsigned int, obj) + __field(ino_t, inode) ), TP_fast_assign( @@ -592,8 +599,8 @@ TRACE_EVENT(cachefiles_mark_failed, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(ino_t, inode ) + __field(unsigned int, obj) + __field(ino_t, inode) ), TP_fast_assign( @@ -613,8 +620,8 @@ TRACE_EVENT(cachefiles_mark_inactive, /* Note that obj may be NULL */ TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(ino_t, inode ) + __field(unsigned int, obj) + __field(ino_t, inode) ), TP_fast_assign( @@ -633,10 +640,10 @@ TRACE_EVENT(cachefiles_vfs_error, TP_ARGS(obj, backer, error, where), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(enum cachefiles_error_trace, where ) - __field(short, error ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(enum cachefiles_error_trace, where) + __field(short, error) ), TP_fast_assign( @@ -660,10 +667,10 @@ TRACE_EVENT(cachefiles_io_error, TP_ARGS(obj, backer, error, where), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(enum cachefiles_error_trace, where ) - __field(short, error ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(enum cachefiles_error_trace, where) + __field(short, error) ), TP_fast_assign( @@ -687,11 +694,11 @@ TRACE_EVENT(cachefiles_ondemand_open, TP_ARGS(obj, msg, load), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) - __field(unsigned int, object_id ) - __field(unsigned int, fd ) - __field(unsigned int, flags ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) + __field(unsigned int, object_id) + __field(unsigned int, fd) + __field(unsigned int, flags) ), TP_fast_assign( @@ -717,9 +724,9 @@ TRACE_EVENT(cachefiles_ondemand_copen, TP_ARGS(obj, msg_id, len), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) - __field(long, len ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) + __field(long, len) ), TP_fast_assign( @@ -740,9 +747,9 @@ TRACE_EVENT(cachefiles_ondemand_close, TP_ARGS(obj, msg), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) - __field(unsigned int, object_id ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) + __field(unsigned int, object_id) ), TP_fast_assign( @@ -764,11 +771,11 @@ TRACE_EVENT(cachefiles_ondemand_read, TP_ARGS(obj, msg, load), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) - __field(unsigned int, object_id ) - __field(loff_t, start ) - __field(size_t, len ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) + __field(unsigned int, object_id) + __field(loff_t, start) + __field(size_t, len) ), TP_fast_assign( @@ -793,8 +800,8 @@ TRACE_EVENT(cachefiles_ondemand_cread, TP_ARGS(obj, msg_id), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, msg_id ) + __field(unsigned int, obj) + __field(unsigned int, msg_id) ), TP_fast_assign( @@ -814,10 +821,10 @@ TRACE_EVENT(cachefiles_ondemand_fd_write, TP_ARGS(obj, backer, start, len), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, backer ) - __field(loff_t, start ) - __field(size_t, len ) + __field(unsigned int, obj) + __field(unsigned int, backer) + __field(loff_t, start) + __field(size_t, len) ), TP_fast_assign( @@ -840,8 +847,8 @@ TRACE_EVENT(cachefiles_ondemand_fd_release, TP_ARGS(obj, object_id), TP_STRUCT__entry( - __field(unsigned int, obj ) - __field(unsigned int, object_id ) + __field(unsigned int, obj) + __field(unsigned int, object_id) ), TP_fast_assign( diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h index 23200aabccac..da4bd9fd1162 100644 --- a/include/trace/events/damon.h +++ b/include/trace/events/damon.h @@ -15,7 +15,7 @@ TRACE_EVENT_CONDITION(damos_before_apply, unsigned int target_idx, struct damon_region *r, unsigned int nr_regions, bool do_trace), - TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace), + TP_ARGS(context_idx, scheme_idx, target_idx, r, nr_regions, do_trace), TP_CONDITION(do_trace), diff --git a/include/trace/events/hugetlbfs.h b/include/trace/events/hugetlbfs.h index 8331c904a9ba..59605dfaeeb4 100644 --- a/include/trace/events/hugetlbfs.h +++ b/include/trace/events/hugetlbfs.h @@ -23,7 +23,7 @@ TRACE_EVENT(hugetlbfs_alloc_inode, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; - __entry->dir = dir->i_ino; + __entry->dir = dir ? dir->i_ino : 0; __entry->mode = mode; ), diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index bb8a59c6caa2..d36c857dd249 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -13,6 +13,69 @@ * Thus most bits set go first. */ +/* These define the values that are enums (the bits) */ +#define TRACE_GFP_FLAGS_GENERAL \ + TRACE_GFP_EM(DMA) \ + TRACE_GFP_EM(HIGHMEM) \ + TRACE_GFP_EM(DMA32) \ + TRACE_GFP_EM(MOVABLE) \ + TRACE_GFP_EM(RECLAIMABLE) \ + TRACE_GFP_EM(HIGH) \ + TRACE_GFP_EM(IO) \ + TRACE_GFP_EM(FS) \ + TRACE_GFP_EM(ZERO) \ + TRACE_GFP_EM(DIRECT_RECLAIM) \ + TRACE_GFP_EM(KSWAPD_RECLAIM) \ + TRACE_GFP_EM(WRITE) \ + TRACE_GFP_EM(NOWARN) \ + TRACE_GFP_EM(RETRY_MAYFAIL) \ + TRACE_GFP_EM(NOFAIL) \ + TRACE_GFP_EM(NORETRY) \ + TRACE_GFP_EM(MEMALLOC) \ + TRACE_GFP_EM(COMP) \ + TRACE_GFP_EM(NOMEMALLOC) \ + TRACE_GFP_EM(HARDWALL) \ + TRACE_GFP_EM(THISNODE) \ + TRACE_GFP_EM(ACCOUNT) \ + TRACE_GFP_EM(ZEROTAGS) + +#ifdef CONFIG_KASAN_HW_TAGS +# define TRACE_GFP_FLAGS_KASAN \ + TRACE_GFP_EM(SKIP_ZERO) \ + TRACE_GFP_EM(SKIP_KASAN) +#else +# define TRACE_GFP_FLAGS_KASAN +#endif + +#ifdef CONFIG_LOCKDEP +# define TRACE_GFP_FLAGS_LOCKDEP \ + TRACE_GFP_EM(NOLOCKDEP) +#else +# define TRACE_GFP_FLAGS_LOCKDEP +#endif + +#ifdef CONFIG_SLAB_OBJ_EXT +# define TRACE_GFP_FLAGS_SLAB \ + TRACE_GFP_EM(NO_OBJ_EXT) +#else +# define TRACE_GFP_FLAGS_SLAB +#endif + +#define TRACE_GFP_FLAGS \ + TRACE_GFP_FLAGS_GENERAL \ + TRACE_GFP_FLAGS_KASAN \ + TRACE_GFP_FLAGS_LOCKDEP \ + TRACE_GFP_FLAGS_SLAB + +#undef TRACE_GFP_EM +#define TRACE_GFP_EM(a) TRACE_DEFINE_ENUM(___GFP_##a##_BIT); + +TRACE_GFP_FLAGS + +/* Just in case these are ever used */ +TRACE_DEFINE_ENUM(___GFP_UNUSED_BIT); +TRACE_DEFINE_ENUM(___GFP_LAST_BIT); + #define gfpflag_string(flag) {(__force unsigned long)flag, #flag} #define __def_gfpflag_names \ diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index bf511bca896e..6e699cadcb29 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -21,6 +21,7 @@ EM(netfs_read_trace_readahead, "READAHEAD") \ EM(netfs_read_trace_readpage, "READPAGE ") \ EM(netfs_read_trace_read_gaps, "READ-GAPS") \ + EM(netfs_read_trace_read_single, "READ-SNGL") \ EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \ E_(netfs_read_trace_write_begin, "WRITEBEGN") @@ -35,9 +36,11 @@ EM(NETFS_READAHEAD, "RA") \ EM(NETFS_READPAGE, "RP") \ EM(NETFS_READ_GAPS, "RG") \ + EM(NETFS_READ_SINGLE, "R1") \ EM(NETFS_READ_FOR_WRITE, "RW") \ EM(NETFS_DIO_READ, "DR") \ EM(NETFS_WRITEBACK, "WB") \ + EM(NETFS_WRITEBACK_SINGLE, "W1") \ EM(NETFS_WRITETHROUGH, "WT") \ EM(NETFS_UNBUFFERED_WRITE, "UW") \ EM(NETFS_DIO_WRITE, "DW") \ @@ -47,17 +50,23 @@ EM(netfs_rreq_trace_assess, "ASSESS ") \ EM(netfs_rreq_trace_copy, "COPY ") \ EM(netfs_rreq_trace_collect, "COLLECT") \ + EM(netfs_rreq_trace_complete, "COMPLET") \ + EM(netfs_rreq_trace_dirty, "DIRTY ") \ EM(netfs_rreq_trace_done, "DONE ") \ EM(netfs_rreq_trace_free, "FREE ") \ EM(netfs_rreq_trace_redirty, "REDIRTY") \ EM(netfs_rreq_trace_resubmit, "RESUBMT") \ + EM(netfs_rreq_trace_set_abandon, "S-ABNDN") \ EM(netfs_rreq_trace_set_pause, "PAUSE ") \ EM(netfs_rreq_trace_unlock, "UNLOCK ") \ EM(netfs_rreq_trace_unlock_pgpriv2, "UNLCK-2") \ EM(netfs_rreq_trace_unmark, "UNMARK ") \ EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \ EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \ + EM(netfs_rreq_trace_wait_queue, "WAIT-Q ") \ EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \ + EM(netfs_rreq_trace_wake_queue, "WAKE-Q ") \ + EM(netfs_rreq_trace_woke_queue, "WOKE-Q ") \ EM(netfs_rreq_trace_unpause, "UNPAUSE") \ E_(netfs_rreq_trace_write_done, "WR-DONE") @@ -74,6 +83,10 @@ #define netfs_sreq_traces \ EM(netfs_sreq_trace_add_donations, "+DON ") \ EM(netfs_sreq_trace_added, "ADD ") \ + EM(netfs_sreq_trace_cache_nowrite, "CA-NW") \ + EM(netfs_sreq_trace_cache_prepare, "CA-PR") \ + EM(netfs_sreq_trace_cache_write, "CA-WR") \ + EM(netfs_sreq_trace_cancel, "CANCL") \ EM(netfs_sreq_trace_clear, "CLEAR") \ EM(netfs_sreq_trace_discard, "DSCRD") \ EM(netfs_sreq_trace_donate_to_prev, "DON-P") \ @@ -84,6 +97,9 @@ EM(netfs_sreq_trace_hit_eof, "EOF ") \ EM(netfs_sreq_trace_io_progress, "IO ") \ EM(netfs_sreq_trace_limited, "LIMIT") \ + EM(netfs_sreq_trace_need_clear, "N-CLR") \ + EM(netfs_sreq_trace_partial_read, "PARTR") \ + EM(netfs_sreq_trace_need_retry, "NRTRY") \ EM(netfs_sreq_trace_prepare, "PREP ") \ EM(netfs_sreq_trace_prep_failed, "PRPFL") \ EM(netfs_sreq_trace_progress, "PRGRS") \ @@ -129,6 +145,7 @@ EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \ EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \ EM(netfs_sreq_trace_new, "NEW ") \ + EM(netfs_sreq_trace_put_abandon, "PUT ABANDON") \ EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \ EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \ EM(netfs_sreq_trace_put_consumed, "PUT CONSUME") \ @@ -152,6 +169,7 @@ EM(netfs_streaming_filled_page, "mod-streamw-f") \ EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \ EM(netfs_folio_trace_abandon, "abandon") \ + EM(netfs_folio_trace_alloc_buffer, "alloc-buf") \ EM(netfs_folio_trace_cancel_copy, "cancel-copy") \ EM(netfs_folio_trace_cancel_store, "cancel-store") \ EM(netfs_folio_trace_clear, "clear") \ @@ -168,6 +186,7 @@ EM(netfs_folio_trace_mkwrite, "mkwrite") \ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \ EM(netfs_folio_trace_not_under_wback, "!wback") \ + EM(netfs_folio_trace_not_locked, "!locked") \ EM(netfs_folio_trace_put, "put") \ EM(netfs_folio_trace_read, "read") \ EM(netfs_folio_trace_read_done, "read-done") \ @@ -191,6 +210,14 @@ EM(netfs_trace_donate_to_next, "to-next") \ E_(netfs_trace_donate_to_deferred_next, "defer-next") +#define netfs_folioq_traces \ + EM(netfs_trace_folioq_alloc_buffer, "alloc-buf") \ + EM(netfs_trace_folioq_clear, "clear") \ + EM(netfs_trace_folioq_delete, "delete") \ + EM(netfs_trace_folioq_make_space, "make-space") \ + EM(netfs_trace_folioq_rollbuf_init, "roll-init") \ + E_(netfs_trace_folioq_read_progress, "r-progress") + #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY @@ -209,6 +236,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); enum netfs_folio_trace { netfs_folio_traces } __mode(byte); enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte); enum netfs_donate_trace { netfs_donate_traces } __mode(byte); +enum netfs_folioq_trace { netfs_folioq_traces } __mode(byte); #endif @@ -232,6 +260,7 @@ netfs_sreq_ref_traces; netfs_folio_traces; netfs_collect_contig_traces; netfs_donate_traces; +netfs_folioq_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -250,13 +279,13 @@ TRACE_EVENT(netfs_read, TP_ARGS(rreq, start, len, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(unsigned int, cookie ) - __field(loff_t, i_size ) - __field(loff_t, start ) - __field(size_t, len ) - __field(enum netfs_read_trace, what ) - __field(unsigned int, netfs_inode ) + __field(unsigned int, rreq) + __field(unsigned int, cookie) + __field(loff_t, i_size) + __field(loff_t, start) + __field(size_t, len) + __field(enum netfs_read_trace, what) + __field(unsigned int, netfs_inode) ), TP_fast_assign( @@ -284,10 +313,10 @@ TRACE_EVENT(netfs_rreq, TP_ARGS(rreq, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(unsigned int, flags ) - __field(enum netfs_io_origin, origin ) - __field(enum netfs_rreq_trace, what ) + __field(unsigned int, rreq) + __field(unsigned int, flags) + __field(enum netfs_io_origin, origin) + __field(enum netfs_rreq_trace, what) ), TP_fast_assign( @@ -311,15 +340,16 @@ TRACE_EVENT(netfs_sreq, TP_ARGS(sreq, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(unsigned short, index ) - __field(short, error ) - __field(unsigned short, flags ) - __field(enum netfs_io_source, source ) - __field(enum netfs_sreq_trace, what ) - __field(size_t, len ) - __field(size_t, transferred ) - __field(loff_t, start ) + __field(unsigned int, rreq) + __field(unsigned short, index) + __field(short, error) + __field(unsigned short, flags) + __field(enum netfs_io_source, source) + __field(enum netfs_sreq_trace, what) + __field(u8, slot) + __field(size_t, len) + __field(size_t, transferred) + __field(loff_t, start) ), TP_fast_assign( @@ -332,15 +362,16 @@ TRACE_EVENT(netfs_sreq, __entry->len = sreq->len; __entry->transferred = sreq->transferred; __entry->start = sreq->start; + __entry->slot = sreq->io_iter.folioq_slot; ), - TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx e=%d", + TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx s=%u e=%d", __entry->rreq, __entry->index, __print_symbolic(__entry->source, netfs_sreq_sources), __print_symbolic(__entry->what, netfs_sreq_traces), __entry->flags, __entry->start, __entry->transferred, __entry->len, - __entry->error) + __entry->slot, __entry->error) ); TRACE_EVENT(netfs_failure, @@ -351,15 +382,15 @@ TRACE_EVENT(netfs_failure, TP_ARGS(rreq, sreq, error, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(short, index ) - __field(short, error ) - __field(unsigned short, flags ) - __field(enum netfs_io_source, source ) - __field(enum netfs_failure, what ) - __field(size_t, len ) - __field(size_t, transferred ) - __field(loff_t, start ) + __field(unsigned int, rreq) + __field(short, index) + __field(short, error) + __field(unsigned short, flags) + __field(enum netfs_io_source, source) + __field(enum netfs_failure, what) + __field(size_t, len) + __field(size_t, transferred) + __field(loff_t, start) ), TP_fast_assign( @@ -390,9 +421,9 @@ TRACE_EVENT(netfs_rreq_ref, TP_ARGS(rreq_debug_id, ref, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(int, ref ) - __field(enum netfs_rreq_ref_trace, what ) + __field(unsigned int, rreq) + __field(int, ref) + __field(enum netfs_rreq_ref_trace, what) ), TP_fast_assign( @@ -414,10 +445,10 @@ TRACE_EVENT(netfs_sreq_ref, TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what), TP_STRUCT__entry( - __field(unsigned int, rreq ) - __field(unsigned int, subreq ) - __field(int, ref ) - __field(enum netfs_sreq_ref_trace, what ) + __field(unsigned int, rreq) + __field(unsigned int, subreq) + __field(int, ref) + __field(enum netfs_sreq_ref_trace, what) ), TP_fast_assign( @@ -465,10 +496,10 @@ TRACE_EVENT(netfs_write_iter, TP_ARGS(iocb, from), TP_STRUCT__entry( - __field(unsigned long long, start ) - __field(size_t, len ) - __field(unsigned int, flags ) - __field(unsigned int, ino ) + __field(unsigned long long, start) + __field(size_t, len) + __field(unsigned int, flags) + __field(unsigned int, ino) ), TP_fast_assign( @@ -489,12 +520,12 @@ TRACE_EVENT(netfs_write, TP_ARGS(wreq, what), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned int, cookie ) - __field(unsigned int, ino ) - __field(enum netfs_write_trace, what ) - __field(unsigned long long, start ) - __field(unsigned long long, len ) + __field(unsigned int, wreq) + __field(unsigned int, cookie) + __field(unsigned int, ino) + __field(enum netfs_write_trace, what) + __field(unsigned long long, start) + __field(unsigned long long, len) ), TP_fast_assign( @@ -522,10 +553,10 @@ TRACE_EVENT(netfs_collect, TP_ARGS(wreq), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned int, len ) - __field(unsigned long long, transferred ) - __field(unsigned long long, start ) + __field(unsigned int, wreq) + __field(unsigned int, len) + __field(unsigned long long, transferred) + __field(unsigned long long, start) ), TP_fast_assign( @@ -548,12 +579,12 @@ TRACE_EVENT(netfs_collect_sreq, TP_ARGS(wreq, subreq), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned int, subreq ) - __field(unsigned int, stream ) - __field(unsigned int, len ) - __field(unsigned int, transferred ) - __field(unsigned long long, start ) + __field(unsigned int, wreq) + __field(unsigned int, subreq) + __field(unsigned int, stream) + __field(unsigned int, len) + __field(unsigned int, transferred) + __field(unsigned long long, start) ), TP_fast_assign( @@ -579,11 +610,11 @@ TRACE_EVENT(netfs_collect_folio, TP_ARGS(wreq, folio, fend, collected_to), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned long, index ) - __field(unsigned long long, fend ) - __field(unsigned long long, cleaned_to ) - __field(unsigned long long, collected_to ) + __field(unsigned int, wreq) + __field(unsigned long, index) + __field(unsigned long long, fend) + __field(unsigned long long, cleaned_to) + __field(unsigned long long, collected_to) ), TP_fast_assign( @@ -608,10 +639,10 @@ TRACE_EVENT(netfs_collect_state, TP_ARGS(wreq, collected_to, notes), TP_STRUCT__entry( - __field(unsigned int, wreq ) - __field(unsigned int, notes ) - __field(unsigned long long, collected_to ) - __field(unsigned long long, cleaned_to ) + __field(unsigned int, wreq) + __field(unsigned int, notes) + __field(unsigned long long, collected_to) + __field(unsigned long long, cleaned_to) ), TP_fast_assign( @@ -680,69 +711,27 @@ TRACE_EVENT(netfs_collect_stream, __entry->collected_to, __entry->front) ); -TRACE_EVENT(netfs_progress, - TP_PROTO(const struct netfs_io_subrequest *subreq, - unsigned long long start, size_t avail, size_t part), +TRACE_EVENT(netfs_folioq, + TP_PROTO(const struct folio_queue *fq, + enum netfs_folioq_trace trace), - TP_ARGS(subreq, start, avail, part), + TP_ARGS(fq, trace), TP_STRUCT__entry( __field(unsigned int, rreq) - __field(unsigned int, subreq) - __field(unsigned int, consumed) - __field(unsigned int, transferred) - __field(unsigned long long, f_start) - __field(unsigned int, f_avail) - __field(unsigned int, f_part) - __field(unsigned char, slot) + __field(unsigned int, id) + __field(enum netfs_folioq_trace, trace) ), TP_fast_assign( - __entry->rreq = subreq->rreq->debug_id; - __entry->subreq = subreq->debug_index; - __entry->consumed = subreq->consumed; - __entry->transferred = subreq->transferred; - __entry->f_start = start; - __entry->f_avail = avail; - __entry->f_part = part; - __entry->slot = subreq->curr_folioq_slot; - ), - - TP_printk("R=%08x[%02x] s=%llx ct=%x/%x pa=%x/%x sl=%x", - __entry->rreq, __entry->subreq, __entry->f_start, - __entry->consumed, __entry->transferred, - __entry->f_part, __entry->f_avail, __entry->slot) - ); - -TRACE_EVENT(netfs_donate, - TP_PROTO(const struct netfs_io_request *rreq, - const struct netfs_io_subrequest *from, - const struct netfs_io_subrequest *to, - size_t amount, - enum netfs_donate_trace trace), - - TP_ARGS(rreq, from, to, amount, trace), - - TP_STRUCT__entry( - __field(unsigned int, rreq) - __field(unsigned int, from) - __field(unsigned int, to) - __field(unsigned int, amount) - __field(enum netfs_donate_trace, trace) - ), - - TP_fast_assign( - __entry->rreq = rreq->debug_id; - __entry->from = from->debug_index; - __entry->to = to ? to->debug_index : -1; - __entry->amount = amount; + __entry->rreq = fq ? fq->rreq_id : 0; + __entry->id = fq ? fq->debug_id : 0; __entry->trace = trace; ), - TP_printk("R=%08x[%02x] -> [%02x] %s am=%x", - __entry->rreq, __entry->from, __entry->to, - __print_symbolic(__entry->trace, netfs_donate_traces), - __entry->amount) + TP_printk("R=%08x fq=%x %s", + __entry->rreq, __entry->id, + __print_symbolic(__entry->trace, netfs_folioq_traces)) ); #undef EM diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h index 24ca0c00cae3..9d9e8ae32b41 100644 --- a/include/uapi/linux/fiemap.h +++ b/include/uapi/linux/fiemap.h @@ -14,37 +14,56 @@ #include <linux/types.h> +/** + * struct fiemap_extent - description of one fiemap extent + * @fe_logical: byte offset of the extent in the file + * @fe_physical: byte offset of extent on disk + * @fe_length: length in bytes for this extent + * @fe_flags: FIEMAP_EXTENT_* flags for this extent + */ struct fiemap_extent { - __u64 fe_logical; /* logical offset in bytes for the start of - * the extent from the beginning of the file */ - __u64 fe_physical; /* physical offset in bytes for the start - * of the extent from the beginning of the disk */ - __u64 fe_length; /* length in bytes for this extent */ + __u64 fe_logical; + __u64 fe_physical; + __u64 fe_length; + /* private: */ __u64 fe_reserved64[2]; - __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */ + /* public: */ + __u32 fe_flags; + /* private: */ __u32 fe_reserved[3]; }; +/** + * struct fiemap - file extent mappings + * @fm_start: byte offset (inclusive) at which to start mapping (in) + * @fm_length: logical length of mapping which userspace wants (in) + * @fm_flags: FIEMAP_FLAG_* flags for request (in/out) + * @fm_mapped_extents: number of extents that were mapped (out) + * @fm_extent_count: size of fm_extents array (in) + * @fm_extents: array of mapped extents (out) + */ struct fiemap { - __u64 fm_start; /* logical offset (inclusive) at - * which to start mapping (in) */ - __u64 fm_length; /* logical length of mapping which - * userspace wants (in) */ - __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ - __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ - __u32 fm_extent_count; /* size of fm_extents array (in) */ + __u64 fm_start; + __u64 fm_length; + __u32 fm_flags; + __u32 fm_mapped_extents; + __u32 fm_extent_count; + /* private: */ __u32 fm_reserved; - struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */ + /* public: */ + struct fiemap_extent fm_extents[]; }; #define FIEMAP_MAX_OFFSET (~0ULL) +/* flags used in fm_flags: */ #define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */ #define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */ #define FIEMAP_FLAG_CACHE 0x00000004 /* request caching of the extents */ #define FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR) +/* flags used in fe_flags: */ #define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ #define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ #define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 753971770733..56a4f93a08f4 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -332,9 +332,13 @@ typedef int __bitwise __kernel_rwf_t; /* Atomic Write */ #define RWF_ATOMIC ((__force __kernel_rwf_t)0x00000040) +/* buffered IO that drops the cache after reading or writing data */ +#define RWF_DONTCACHE ((__force __kernel_rwf_t)0x00000080) + /* mask of flags supported by the kernel */ #define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ - RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC) + RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC |\ + RWF_DONTCACHE) #define PROCFS_IOCTL_MAGIC 'f' diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index 059b6537f2b7..34810f6ae2b5 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -297,7 +297,7 @@ struct iommu_ioas_unmap { * ioctl(IOMMU_OPTION_HUGE_PAGES) * @IOMMU_OPTION_RLIMIT_MODE: * Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege - * to invoke this. Value 0 (default) is user based accouting, 1 uses process + * to invoke this. Value 0 (default) is user based accounting, 1 uses process * based accounting. Global option, object_id must be 0 * @IOMMU_OPTION_HUGE_PAGES: * Value 1 (default) allows contiguous pages to be combined when generating @@ -390,7 +390,7 @@ struct iommu_vfio_ioas { * @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The * domain can be attached to any PASID on the device. * Any domain attached to the non-PASID part of the - * device must also be flaged, otherwise attaching a + * device must also be flagged, otherwise attaching a * PASID will blocked. * If IOMMU does not support PASID it will return * error (-EOPNOTSUPP). @@ -558,16 +558,25 @@ struct iommu_hw_info_vtd { * For the details of @idr, @iidr and @aidr, please refer to the chapters * from 6.3.1 to 6.3.6 in the SMMUv3 Spec. * - * User space should read the underlying ARM SMMUv3 hardware information for - * the list of supported features. + * This reports the raw HW capability, and not all bits are meaningful to be + * read by userspace. Only the following fields should be used: * - * Note that these values reflect the raw HW capability, without any insight if - * any required kernel driver support is present. Bits may be set indicating the - * HW has functionality that is lacking kernel software support, such as BTM. If - * a VMM is using this information to construct emulated copies of these - * registers it should only forward bits that it knows it can support. + * idr[0]: ST_LEVEL, TERM_MODEL, STALL_MODEL, TTENDIAN , CD2L, ASID16, TTF + * idr[1]: SIDSIZE, SSIDSIZE + * idr[3]: BBML, RIL + * idr[5]: VAX, GRAN64K, GRAN16K, GRAN4K * - * In future, presence of required kernel support will be indicated in flags. + * - S1P should be assumed to be true if a NESTED HWPT can be created + * - VFIO/iommufd only support platforms with COHACC, it should be assumed to be + * true. + * - ATS is a per-device property. If the VMM describes any devices as ATS + * capable in ACPI/DT it should set the corresponding idr. + * + * This list may expand in future (eg E0PD, AIE, PBHA, D128, DS etc). It is + * important that VMMs do not read bits outside the list to allow for + * compatibility with future kernels. Several features in the SMMUv3 + * architecture are not currently supported by the kernel for nesting: HTTU, + * BTM, MPAM and others. */ struct iommu_hw_info_arm_smmuv3 { __u32 flags; @@ -766,7 +775,7 @@ struct iommu_hwpt_vtd_s1_invalidate { }; /** - * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation + * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cache invalidation * (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3) * @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ. * Must be little-endian. diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h index 50589e5dd6a3..84fa8a21dfd0 100644 --- a/include/uapi/linux/mptcp_pm.h +++ b/include/uapi/linux/mptcp_pm.h @@ -12,31 +12,33 @@ /** * enum mptcp_event_type * @MPTCP_EVENT_UNSPEC: unused event - * @MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6, - * sport, dport A new MPTCP connection has been created. It is the good time - * to allocate memory and send ADD_ADDR if needed. Depending on the + * @MPTCP_EVENT_CREATED: A new MPTCP connection has been created. It is the + * good time to allocate memory and send ADD_ADDR if needed. Depending on the * traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED - * is sent. - * @MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6, - * sport, dport A MPTCP connection is established (can start new subflows). - * @MPTCP_EVENT_CLOSED: token A MPTCP connection has stopped. - * @MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] A - * new address has been announced by the peer. - * @MPTCP_EVENT_REMOVED: token, rem_id An address has been lost by the peer. - * @MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, saddr4 | - * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error] A new - * subflow has been established. 'error' should not be set. - * @MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6, - * daddr4 | daddr6, sport, dport, backup, if_idx [, error] A subflow has been - * closed. An error (copy of sk_err) could be set if an error has been - * detected for this subflow. - * @MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6, - * daddr4 | daddr6, sport, dport, backup, if_idx [, error] The priority of a - * subflow has changed. 'error' should not be set. - * @MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 A new PM - * listener is created. - * @MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 A PM listener - * is closed. + * is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport, server-side. + * @MPTCP_EVENT_ESTABLISHED: A MPTCP connection is established (can start new + * subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport, server-side. + * @MPTCP_EVENT_CLOSED: A MPTCP connection has stopped. Attribute: token. + * @MPTCP_EVENT_ANNOUNCED: A new address has been announced by the peer. + * Attributes: token, rem_id, family, daddr4 | daddr6 [, dport]. + * @MPTCP_EVENT_REMOVED: An address has been lost by the peer. Attributes: + * token, rem_id. + * @MPTCP_EVENT_SUB_ESTABLISHED: A new subflow has been established. 'error' + * should not be set. Attributes: token, family, loc_id, rem_id, saddr4 | + * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error]. + * @MPTCP_EVENT_SUB_CLOSED: A subflow has been closed. An error (copy of + * sk_err) could be set if an error has been detected for this subflow. + * Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | + * daddr6, sport, dport, backup, if_idx [, error]. + * @MPTCP_EVENT_SUB_PRIORITY: The priority of a subflow has changed. 'error' + * should not be set. Attributes: token, family, loc_id, rem_id, saddr4 | + * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error]. + * @MPTCP_EVENT_LISTENER_CREATED: A new PM listener is created. Attributes: + * family, sport, saddr4 | saddr6. + * @MPTCP_EVENT_LISTENER_CLOSED: A PM listener is closed. Attributes: family, + * sport, saddr4 | saddr6. */ enum mptcp_event_type { MPTCP_EVENT_UNSPEC, diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index 58154117d9b0..a6fce46aeb37 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -8,6 +8,13 @@ #define __always_inline inline #endif +/* Not all C++ standards support type declarations inside an anonymous union */ +#ifndef __cplusplus +#define __struct_group_tag(TAG) TAG +#else +#define __struct_group_tag(TAG) +#endif + /** * __struct_group() - Create a mirrored named and anonyomous struct * @@ -20,13 +27,13 @@ * and size: one anonymous and one named. The former's members can be used * normally without sub-struct naming, and the latter can be used to * reason about the start, end, and size of the group of struct members. - * The named struct can also be explicitly tagged for layer reuse, as well - * as both having struct attributes appended. + * The named struct can also be explicitly tagged for layer reuse (C only), + * as well as both having struct attributes appended. */ #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ union { \ struct { MEMBERS } ATTRS; \ - struct TAG { MEMBERS } ATTRS NAME; \ + struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \ } ATTRS #ifdef __cplusplus diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h index ba8604bdf206..349718c271eb 100644 --- a/include/uapi/linux/thermal.h +++ b/include/uapi/linux/thermal.h @@ -3,8 +3,8 @@ #define _UAPI_LINUX_THERMAL_H #define THERMAL_NAME_LENGTH 20 -#define THERMAL_THRESHOLD_WAY_UP BIT(0) -#define THERMAL_THRESHOLD_WAY_DOWN BIT(1) +#define THERMAL_THRESHOLD_WAY_UP 0x1 +#define THERMAL_THRESHOLD_WAY_DOWN 0x2 enum thermal_device_mode { THERMAL_DEVICE_DISABLED = 0, diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index d7aca9e61684..74e5b9960c54 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -310,7 +310,9 @@ struct ufs_pwr_mode_info { * to allow variant specific Uni-Pro initialization. * @pwr_change_notify: called before and after a power mode change * is carried out to allow vendor spesific capabilities - * to be set. + * to be set. PRE_CHANGE can modify final_params based + * on desired_pwr_mode, but POST_CHANGE must not alter + * the final_params parameter * @setup_xfer_req: called before any transfer request is issued * to set some things * @setup_task_mgmt: called before any task management request is issued @@ -327,7 +329,6 @@ struct ufs_pwr_mode_info { * @program_key: program or evict an inline encryption key * @fill_crypto_prdt: initialize crypto-related fields in the PRDT * @event_notify: called to notify important events - * @reinit_notify: called to notify reinit of UFSHCD during max gear switch * @mcq_config_resource: called to configure MCQ platform resources * @get_hba_mac: reports maximum number of outstanding commands supported by * the controller. Should be implemented for UFSHCI 4.0 or later @@ -353,9 +354,9 @@ struct ufs_hba_variant_ops { int (*link_startup_notify)(struct ufs_hba *, enum ufs_notify_change_status); int (*pwr_change_notify)(struct ufs_hba *, - enum ufs_notify_change_status status, - struct ufs_pa_layer_attr *, - struct ufs_pa_layer_attr *); + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *desired_pwr_mode, + struct ufs_pa_layer_attr *final_params); void (*setup_xfer_req)(struct ufs_hba *hba, int tag, bool is_scsi_cmd); void (*setup_task_mgmt)(struct ufs_hba *, int, u8); @@ -379,7 +380,6 @@ struct ufs_hba_variant_ops { void *prdt, unsigned int num_segments); void (*event_notify)(struct ufs_hba *hba, enum ufs_event_type evt, void *data); - void (*reinit_notify)(struct ufs_hba *); int (*mcq_config_resource)(struct ufs_hba *hba); int (*get_hba_mac)(struct ufs_hba *hba); int (*op_runtime_config)(struct ufs_hba *hba); |