diff options
Diffstat (limited to 'include/linux')
150 files changed, 2211 insertions, 2855 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index bfacb9475aac..67effb91fa98 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -959,6 +959,12 @@ static inline int acpi_table_parse(char *id, return -ENODEV; } +static inline int acpi_get_cpu_uid(unsigned int cpu, u32 *uid) +{ + *uid = cpu; + return 0; +} + static inline int acpi_nvs_register(__u64 start, __u64 size) { return 0; diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index d40ac39bfbe8..02de2ede560f 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -163,9 +163,11 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) { WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n"); } +void alloc_tag_add_early_pfn(unsigned long pfn); #else static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {} static inline void alloc_tag_sub_check(union codetag_ref *ref) {} +static inline void alloc_tag_add_early_pfn(unsigned long pfn) {} #endif /* Caller should verify both ref and tag to be valid */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 50b47eba7d01..e7195750d21b 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -105,6 +105,12 @@ ARM_SMCCC_SMC_32, \ 0, 0x3fff) +/* C1-Pro erratum 4193714: SME DVMSync early acknowledgement */ +#define ARM_SMCCC_CPU_WORKAROUND_4193714 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_CPU, 0x10) + #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ ARM_SMCCC_SMC_32, \ diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h index 4c506e76a808..492ceeb1cdf8 100644 --- a/include/linux/bootmem_info.h +++ b/include/linux/bootmem_info.h @@ -44,10 +44,6 @@ static inline void free_bootmem_page(struct page *page) { enum bootmem_type type = bootmem_type(page); - /* - * The reserve_bootmem_region sets the reserved flag on bootmem - * pages. - */ VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); if (type == SECTION_INFO || type == MIX_SECTION_INFO) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0136a108d083..01e203964892 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1541,6 +1541,8 @@ bool bpf_has_frame_pointer(unsigned long ip); int bpf_jit_charge_modmem(u32 size); void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); +bool bpf_insn_is_indirect_target(const struct bpf_verifier_env *env, const struct bpf_prog *prog, + int insn_idx); #else static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr, @@ -3723,6 +3725,7 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto; extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; extern const struct bpf_func_proto bpf_sk_setsockopt_proto; extern const struct bpf_func_proto bpf_sk_getsockopt_proto; +extern const struct bpf_func_proto bpf_sk_setsockopt_nodelay_proto; extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto; extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto; extern const struct bpf_func_proto bpf_find_vma_proto; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 53e8664cb566..b148f816f25b 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -630,16 +630,17 @@ struct bpf_insn_aux_data { /* below fields are initialized once */ unsigned int orig_idx; /* original instruction index */ - bool jmp_point; - bool prune_point; + u32 jmp_point:1; + u32 prune_point:1; /* ensure we check state equivalence and save state checkpoint and * this instruction, regardless of any heuristics */ - bool force_checkpoint; + u32 force_checkpoint:1; /* true if instruction is a call to a helper function that * accepts callback function as a parameter. */ - bool calls_callback; + u32 calls_callback:1; + u32 indirect_target:1; /* if it is an indirect jump target */ /* * CFG strongly connected component this instruction belongs to, * zero if it is a singleton SCC. diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index b907e6c2307d..260d7968cf72 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -108,6 +108,7 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, unsigned int clearing); +extern void cdrom_probe_write_features(struct cdrom_device_info *cdi); extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi); extern void unregister_cdrom(struct cdrom_device_info *cdi); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index f42563739d2e..50a784da7a81 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -611,8 +611,8 @@ struct cgroup { /* used to wait for offlining of csses */ wait_queue_head_t offline_waitq; - /* used by cgroup_rmdir() to wait for dying tasks to leave */ - wait_queue_head_t dying_populated_waitq; + /* defers killing csses after removal until cgroup is depopulated */ + struct work_struct finish_destroy_work; /* used to schedule release agent */ struct work_struct release_agent_work; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e52160e85af4..f6d037a30fd8 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -53,6 +53,7 @@ struct kernel_clone_args; enum css_task_iter_flags { CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */ CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */ + CSS_TASK_ITER_WITH_DEAD = (1U << 2), /* include exiting tasks */ CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */ }; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 630705a47129..b01a38fef8cf 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -136,10 +136,6 @@ struct clk_duty { * 0. Returns the calculated rate. Optional, but recommended - if * this op is not set then clock rate will be initialized to 0. * - * @round_rate: Given a target rate as input, returns the closest rate actually - * supported by the clock. The parent rate is an input/output - * parameter. - * * @determine_rate: Given a target rate as input, returns the closest rate * actually supported by the clock, and optionally the parent clock * that should be used to provide the clock rate. @@ -163,13 +159,13 @@ struct clk_duty { * * @set_rate: Change the rate of this clock. The requested rate is specified * by the second argument, which should typically be the return - * of .round_rate call. The third argument gives the parent rate - * which is likely helpful for most .set_rate implementation. + * of .determine_rate call. The third argument gives the parent + * rate which is likely helpful for most .set_rate implementation. * Returns 0 on success, -EERROR otherwise. * * @set_rate_and_parent: Change the rate and the parent of this clock. The * requested rate is specified by the second argument, which - * should typically be the return of .round_rate call. The + * should typically be the return of clk_round_rate() call. The * third argument gives the parent rate which is likely helpful * for most .set_rate_and_parent implementation. The fourth * argument gives the parent index. This callback is optional (and @@ -244,8 +240,6 @@ struct clk_ops { void (*restore_context)(struct clk_hw *hw); unsigned long (*recalc_rate)(struct clk_hw *hw, unsigned long parent_rate); - long (*round_rate)(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate); int (*determine_rate)(struct clk_hw *hw, struct clk_rate_request *req); int (*set_parent)(struct clk_hw *hw, u8 index); @@ -679,7 +673,7 @@ struct clk_div_table { * @lock: register lock * * Clock with an adjustable divider affecting its output frequency. Implements - * .recalc_rate, .set_rate and .round_rate + * .recalc_rate, .set_rate and .determine_rate * * @flags: * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the @@ -739,14 +733,6 @@ extern const struct clk_ops clk_divider_ro_ops; unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, unsigned int val, const struct clk_div_table *table, unsigned long flags, unsigned long width); -long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, - unsigned long rate, unsigned long *prate, - const struct clk_div_table *table, - u8 width, unsigned long flags); -long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, - unsigned long rate, unsigned long *prate, - const struct clk_div_table *table, u8 width, - unsigned long flags, unsigned int val); int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req, const struct clk_div_table *table, u8 width, unsigned long flags); @@ -948,6 +934,26 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, (shift), (width), (clk_divider_flags), \ NULL, (lock)) /** + * devm_clk_hw_register_divider_parent_data - register a divider clock with the + * clock framework + * @dev: device registering this clock + * @name: name of this clock + * @parent_data: parent clk data + * @flags: framework-specific flags + * @reg: register address to adjust divider + * @shift: number of bits to shift the bitfield + * @width: width of the bitfield + * @clk_divider_flags: divider-specific flags for this clock + * @lock: shared register lock for this clock + */ +#define devm_clk_hw_register_divider_parent_data(dev, name, parent_data, \ + flags, reg, shift, width, \ + clk_divider_flags, lock) \ + __devm_clk_hw_register_divider((dev), NULL, (name), NULL, NULL, \ + (parent_data), (flags), (reg), (shift), \ + (width), (clk_divider_flags), NULL, \ + (lock)) +/** * devm_clk_hw_register_divider_table - register a table based divider clock * with the clock framework (devres variant) * @dev: device registering this clock @@ -1126,7 +1132,7 @@ void of_fixed_factor_clk_setup(struct device_node *node); * * Clock with a fixed multiplier and divider. The output frequency is the * parent clock rate divided by div and multiplied by mult. - * Implements .recalc_rate, .set_rate, .round_rate and .recalc_accuracy + * Implements .recalc_rate, .set_rate, .determine_rate and .recalc_accuracy * * Flags: * * CLK_FIXED_FACTOR_FIXED_ACCURACY - Use the value in @acc instead of the @@ -1254,7 +1260,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw); * @lock: register lock * * Clock with an adjustable multiplier affecting its output frequency. - * Implements .recalc_rate, .set_rate and .round_rate + * Implements .recalc_rate, .set_rate and .determine_rate * * @flags: * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read @@ -1437,26 +1443,6 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) dst->core = src->core; } -static inline long divider_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate, - const struct clk_div_table *table, - u8 width, unsigned long flags) -{ - return divider_round_rate_parent(hw, clk_hw_get_parent(hw), - rate, prate, table, width, flags); -} - -static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate, - const struct clk_div_table *table, - u8 width, unsigned long flags, - unsigned int val) -{ - return divider_ro_round_rate_parent(hw, clk_hw_get_parent(hw), - rate, prate, table, width, flags, - val); -} - /* * FIXME clock api without lock protection */ diff --git a/include/linux/comedi/comedi_isadma.h b/include/linux/comedi/comedi_isadma.h index 9d2b12db7e6e..7514ce222fa6 100644 --- a/include/linux/comedi/comedi_isadma.h +++ b/include/linux/comedi/comedi_isadma.h @@ -48,11 +48,11 @@ struct comedi_isadma_desc { */ struct comedi_isadma { struct device *dev; - struct comedi_isadma_desc *desc; int n_desc; int cur_dma; unsigned int chan; unsigned int chan2; + struct comedi_isadma_desc desc[] __counted_by(n_desc); }; #if IS_ENABLED(CONFIG_ISA_DMA_API) diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h index 35fdc41845ce..577a08f37aee 100644 --- a/include/linux/comedi/comedidev.h +++ b/include/linux/comedi/comedidev.h @@ -1026,10 +1026,55 @@ int comedi_load_firmware(struct comedi_device *dev, struct device *hw_dev, unsigned long context), unsigned long context); -int __comedi_request_region(struct comedi_device *dev, - unsigned long start, unsigned long len); -int comedi_request_region(struct comedi_device *dev, - unsigned long start, unsigned long len); +int __comedi_check_request_region(struct comedi_device *dev, + unsigned long start, unsigned long len, + unsigned long minstart, unsigned long maxend, + unsigned long minalign); +int comedi_check_request_region(struct comedi_device *dev, + unsigned long start, unsigned long len, + unsigned long minstart, unsigned long maxend, + unsigned long minalign); + +/** + * __comedi_request_region() - Request an I/O region for a legacy driver + * @dev: COMEDI device. + * @start: Base address of the I/O region. + * @len: Length of the I/O region. + * + * Requests the specified I/O port region which must start at a non-zero + * address. + * + * Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request + * fails. + */ +static inline int __comedi_request_region(struct comedi_device *dev, + unsigned long start, + unsigned long len) +{ + return __comedi_check_request_region(dev, start, len, 0, ~0ul, 1); +} + +/** + * comedi_request_region() - Request an I/O region for a legacy driver + * @dev: COMEDI device. + * @start: Base address of the I/O region. + * @len: Length of the I/O region. + * + * Requests the specified I/O port region which must start at a non-zero + * address. + * + * On success, @dev->iobase is set to the base address of the region and + * @dev->iolen is set to its length. + * + * Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request + * fails. + */ +static inline int comedi_request_region(struct comedi_device *dev, + unsigned long start, unsigned long len) +{ + return comedi_check_request_region(dev, start, len, 0, ~0ul, 1); +} + void comedi_legacy_detach(struct comedi_device *dev); int comedi_auto_config(struct device *hardware_device, diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 2b48be97fcd0..2131febebee9 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -306,24 +306,19 @@ struct coresight_device { * coresight_dev_list - Mapping for devices to "name" index for device * names. * + * @node: Node on the global device index list. * @nr_idx: Number of entries already allocated. * @pfx: Prefix pattern for device name. * @fwnode_list: Array of fwnode_handles associated with each allocated * index, upto nr_idx entries. */ struct coresight_dev_list { + struct list_head node; int nr_idx; - const char *pfx; + char *pfx; struct fwnode_handle **fwnode_list; }; -#define DEFINE_CORESIGHT_DEVLIST(var, dev_pfx) \ -static struct coresight_dev_list (var) = { \ - .pfx = dev_pfx, \ - .nr_idx = 0, \ - .fwnode_list = NULL, \ -} - #define to_coresight_device(d) container_of(d, struct coresight_device, dev) /** @@ -663,8 +658,7 @@ void coresight_clear_self_claim_tag(struct csdev_access *csa); void coresight_clear_self_claim_tag_unlocked(struct csdev_access *csa); void coresight_disclaim_device(struct coresight_device *csdev); void coresight_disclaim_device_unlocked(struct coresight_device *csdev); -char *coresight_alloc_device_name(struct coresight_dev_list *devs, - struct device *dev); +char *coresight_alloc_device_name(const char *prefix, struct device *dev); bool coresight_loses_context_with_cpu(struct device *dev); diff --git a/include/linux/damon.h b/include/linux/damon.h index d9a3babbafc1..f2cdb7c3f5e6 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -818,9 +818,11 @@ struct damon_ctx { /* lists of &struct damon_call_control */ struct list_head call_controls; + bool call_controls_obsolete; struct mutex call_controls_lock; struct damos_walk_control *walk_control; + bool walk_control_obsolete; struct mutex walk_control_lock; /* diff --git a/include/linux/dax.h b/include/linux/dax.h index 10a7cc79aea5..fe6c3ded1b50 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -54,6 +54,7 @@ struct dax_device *alloc_dax(void *private, const struct dax_operations *ops); void *dax_holder(struct dax_device *dax_dev); void put_dax(struct dax_device *dax_dev); void kill_dax(struct dax_device *dax_dev); +struct dax_device *dax_dev_get(dev_t devt); void dax_write_cache(struct dax_device *dax_dev, bool wc); bool dax_write_cache_enabled(struct dax_device *dax_dev); bool dax_synchronous(struct dax_device *dax_dev); @@ -130,7 +131,6 @@ int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk); void dax_remove_host(struct gendisk *disk); struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off, void *holder, const struct dax_holder_operations *ops); -void fs_put_dax(struct dax_device *dax_dev, void *holder); #else static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) { @@ -145,14 +145,15 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, { return NULL; } -static inline void fs_put_dax(struct dax_device *dax_dev, void *holder) -{ -} #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ #if IS_ENABLED(CONFIG_FS_DAX) +void fs_put_dax(struct dax_device *dax_dev, void *holder); +int fs_dax_get(struct dax_device *dax_dev, void *holder, + const struct dax_holder_operations *hops); int dax_writeback_mapping_range(struct address_space *mapping, struct dax_device *dax_dev, struct writeback_control *wbc); +int dax_folio_reset_order(struct folio *folio); struct page *dax_layout_busy_page(struct address_space *mapping); struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); @@ -163,6 +164,15 @@ dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, void dax_unlock_mapping_entry(struct address_space *mapping, unsigned long index, dax_entry_t cookie); #else +static inline void fs_put_dax(struct dax_device *dax_dev, void *holder) +{ +} + +static inline int fs_dax_get(struct dax_device *dax_dev, void *holder, + const struct dax_holder_operations *hops) +{ + return -EOPNOTSUPP; +} static inline struct page *dax_layout_busy_page(struct address_space *mapping) { return NULL; @@ -242,6 +252,7 @@ static inline void dax_break_layout_final(struct inode *inode) bool dax_alive(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); +int dax_set_ops(struct dax_device *dax_dev, const struct dax_operations *ops); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, unsigned long *pfn); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c5bd5a74baba..2577c05f84ec 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -88,6 +88,7 @@ union shortname_store { #define d_lock d_lockref.lock #define d_iname d_shortname.string +struct completion_list; struct dentry { /* RCU lookup touched fields */ @@ -122,13 +123,24 @@ struct dentry { struct hlist_node d_sib; /* child of parent list */ struct hlist_head d_children; /* our children */ /* - * d_alias and d_rcu can share memory + * the following members can share memory - their uses are + * mutually exclusive. */ union { - struct hlist_node d_alias; /* inode alias list */ - struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ + /* positives: inode alias list */ + struct hlist_node d_alias; + /* in-lookup ones (all negative, live): hash chain */ + struct hlist_bl_node d_in_lookup_hash; + /* killed ones: (already negative) used to schedule freeing */ struct rcu_head d_rcu; - } d_u; + /* + * live non-in-lookup negatives: used if shrink_dcache_tree() + * races with eviction by another thread and needs to wait for + * this dentry to get killed . Remains NULL for almost all + * negative dentries. + */ + struct completion_list *waiters; + }; }; /* @@ -616,4 +628,8 @@ void set_default_d_op(struct super_block *, const struct dentry_operations *); struct dentry *d_make_persistent(struct dentry *, struct inode *); void d_make_discardable(struct dentry *dentry); +/* inode->i_lock must be held over that */ +#define for_each_alias(dentry, inode) \ + hlist_for_each_entry(dentry, &(inode)->i_dentry, d_alias) + #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/device.h b/include/linux/device.h index 67cec9ec0cd0..9c8fde6a3d86 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -505,6 +505,22 @@ struct device_physical_location { }; /** + * enum struct_device_flags - Flags in struct device + * + * Each flag should have a set of accessor functions created via + * __create_dev_flag_accessors() for each access. + * + * @DEV_FLAG_READY_TO_PROBE: If set then device_add() has finished enough + * initialization that probe could be called. + * @DEV_FLAG_COUNT: Number of defined struct_device_flags. + */ +enum struct_device_flags { + DEV_FLAG_READY_TO_PROBE = 0, + + DEV_FLAG_COUNT +}; + +/** * struct device - The basic device structure * @parent: The device's "parent" device, the device to which it is attached. * In most cases, a parent device is some sort of bus or host @@ -599,6 +615,7 @@ struct device_physical_location { * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers. * @dma_iommu: Device is using default IOMMU implementation for DMA and * doesn't rely on dma_ops structure. + * @flags: DEV_FLAG_XXX flags. Use atomic bitfield operations to modify. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -721,8 +738,36 @@ struct device { #ifdef CONFIG_IOMMU_DMA bool dma_iommu:1; #endif + + DECLARE_BITMAP(flags, DEV_FLAG_COUNT); }; +#define __create_dev_flag_accessors(accessor_name, flag_name) \ +static inline bool dev_##accessor_name(const struct device *dev) \ +{ \ + return test_bit(flag_name, dev->flags); \ +} \ +static inline void dev_set_##accessor_name(struct device *dev) \ +{ \ + set_bit(flag_name, dev->flags); \ +} \ +static inline void dev_clear_##accessor_name(struct device *dev) \ +{ \ + clear_bit(flag_name, dev->flags); \ +} \ +static inline void dev_assign_##accessor_name(struct device *dev, bool value) \ +{ \ + assign_bit(flag_name, dev->flags, value); \ +} \ +static inline bool dev_test_and_set_##accessor_name(struct device *dev) \ +{ \ + return test_and_set_bit(flag_name, dev->flags); \ +} + +__create_dev_flag_accessors(ready_to_probe, DEV_FLAG_READY_TO_PROBE); + +#undef __create_dev_flag_accessors + /** * struct device_link - Device link representation. * @supplier: The device on the supplier end of the link. diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 166933b82e27..d1203da56fc5 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -322,13 +322,13 @@ struct dma_buf { * @vmapping_counter: * * Used internally to refcnt the vmaps returned by dma_buf_vmap(). - * Protected by @lock. + * Protected by @resv. */ unsigned vmapping_counter; /** * @vmap_ptr: - * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. + * The current vmap ptr if @vmapping_counter > 0. Protected by @resv. */ struct iosys_map vmap_ptr; diff --git a/include/linux/dpll.h b/include/linux/dpll.h index b7277a8b484d..f8037f1ab20b 100644 --- a/include/linux/dpll.h +++ b/include/linux/dpll.h @@ -286,6 +286,7 @@ int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin, int dpll_device_change_ntf(struct dpll_device *dpll); +int __dpll_pin_change_ntf(struct dpll_pin *pin); int dpll_pin_change_ntf(struct dpll_pin *pin); int register_dpll_notifier(struct notifier_block *nb); diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index e04d67e999a1..416a3352261f 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -20,31 +20,21 @@ /* * SYSCALL_WORK flags handled in syscall_enter_from_user_mode() */ -#ifndef ARCH_SYSCALL_WORK_ENTER -# define ARCH_SYSCALL_WORK_ENTER (0) -#endif - -/* - * SYSCALL_WORK flags handled in syscall_exit_to_user_mode() - */ -#ifndef ARCH_SYSCALL_WORK_EXIT -# define ARCH_SYSCALL_WORK_EXIT (0) -#endif - #define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \ SYSCALL_WORK_SYSCALL_TRACEPOINT | \ SYSCALL_WORK_SYSCALL_TRACE | \ SYSCALL_WORK_SYSCALL_EMU | \ SYSCALL_WORK_SYSCALL_AUDIT | \ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ - SYSCALL_WORK_SYSCALL_RSEQ_SLICE | \ - ARCH_SYSCALL_WORK_ENTER) + SYSCALL_WORK_SYSCALL_RSEQ_SLICE) +/* + * SYSCALL_WORK flags handled in syscall_exit_to_user_mode() + */ #define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \ SYSCALL_WORK_SYSCALL_TRACE | \ SYSCALL_WORK_SYSCALL_AUDIT | \ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ - SYSCALL_WORK_SYSCALL_EXIT_TRAP | \ - ARCH_SYSCALL_WORK_EXIT) + SYSCALL_WORK_SYSCALL_EXIT_TRAP) /** * arch_ptrace_report_syscall_entry - Architecture specific ptrace_report_syscall_entry() wrapper diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index ea9ca0e4172a..728fb5dee5ed 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -39,12 +39,16 @@ static inline void eventpoll_release(struct file *file) { /* - * Fast check to avoid the get/release of the semaphore. Since - * we're doing this outside the semaphore lock, it might return - * false negatives, but we don't care. It'll help in 99.99% of cases - * to avoid the semaphore lock. False positives simply cannot happen - * because the file in on the way to be removed and nobody ( but - * eventpoll ) has still a reference to this file. + * Fast check to skip the slow path in the common case where the + * file was never attached to an epoll. Safe without file->f_lock + * because every f_ep writer excludes a concurrent __fput() on + * @file: + * - ep_insert() requires the file alive (refcount > 0); + * - ep_remove() holds @file pinned via epi_fget() across the + * write; + * - eventpoll_release_file() runs from __fput() itself. + * We are in __fput() here, so none of those can race us: a NULL + * observation truly means no epoll path has work left on @file. */ if (likely(!READ_ONCE(file->f_ep))) return; diff --git a/include/linux/evm.h b/include/linux/evm.h index ddece4a6b25d..913f4573b203 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -18,6 +18,8 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry, const char *xattr_name, void *xattr_value, size_t xattr_value_len); +int evm_fix_hmac(struct dentry *dentry, const char *xattr_name, + const char *xattr_value, size_t xattr_value_len); int evm_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, struct xattr *xattrs, int *xattr_count); @@ -51,6 +53,12 @@ static inline enum integrity_status evm_verifyxattr(struct dentry *dentry, { return INTEGRITY_UNKNOWN; } + +static inline int evm_fix_hmac(struct dentry *dentry, const char *xattr_name, + const char *xattr_value, size_t xattr_value_len) +{ + return -EOPNOTSUPP; +} #endif static inline int evm_inode_init_security(struct inode *inode, struct inode *dir, diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index dc41722fcc9d..829a59399dac 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -80,6 +80,9 @@ enum stop_cp_reason { STOP_CP_REASON_NO_SEGMENT, STOP_CP_REASON_CORRUPTED_FREE_BITMAP, STOP_CP_REASON_CORRUPTED_NID, + STOP_CP_REASON_READ_META, + STOP_CP_REASON_READ_NODE, + STOP_CP_REASON_READ_DATA, STOP_CP_REASON_MAX, }; diff --git a/include/linux/filelock.h b/include/linux/filelock.h index d2c9740e26a8..5f0a2fb31450 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -50,6 +50,7 @@ struct lease_manager_operations { void (*lm_setup)(struct file_lease *, void **); bool (*lm_breaker_owns_lease)(struct file_lease *); int (*lm_open_conflict)(struct file *, int); + bool (*lm_breaker_timedout)(struct file_lease *fl); }; struct lock_manager { diff --git a/include/linux/filter.h b/include/linux/filter.h index f552170eacf4..1ec6d5ba64cc 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1108,6 +1108,8 @@ sk_filter_reason(struct sock *sk, struct sk_buff *skb) return sk_filter_trim_cap(sk, skb, 1); } +struct bpf_prog *__bpf_prog_select_runtime(struct bpf_verifier_env *env, struct bpf_prog *fp, + int *err); struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); @@ -1153,7 +1155,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ (void *)__bpf_call_base) -struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); +struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_jit_inlines_helper_call(s32 imm); @@ -1184,6 +1186,31 @@ static inline bool bpf_dump_raw_ok(const struct cred *cred) struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); + +#ifdef CONFIG_BPF_SYSCALL +struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, + const struct bpf_insn *patch, u32 len); +struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env); +void bpf_restore_insn_aux_data(struct bpf_verifier_env *env, + struct bpf_insn_aux_data *orig_insn_aux); +#else +static inline struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, + const struct bpf_insn *patch, u32 len) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env) +{ + return NULL; +} + +static inline void bpf_restore_insn_aux_data(struct bpf_verifier_env *env, + struct bpf_insn_aux_data *orig_insn_aux) +{ +} +#endif /* CONFIG_BPF_SYSCALL */ + int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); static inline bool xdp_return_frame_no_direct(void) @@ -1310,9 +1337,14 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog, const char *bpf_jit_get_prog_name(struct bpf_prog *prog); -struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); +struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog); void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); +static inline bool bpf_prog_need_blind(const struct bpf_prog *prog) +{ + return prog->blinding_requested && !prog->blinded; +} + static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { @@ -1451,6 +1483,20 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) { } +static inline bool bpf_prog_need_blind(const struct bpf_prog *prog) +{ + return false; +} + +static inline +struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog) +{ + return prog; +} + +static inline void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) +{ +} #endif /* CONFIG_BPF_JIT */ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index 0a3bcd1718f3..be1b38c981d4 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -94,6 +94,7 @@ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num); int register_fprobe_syms(struct fprobe *fp, const char **syms, int num); int unregister_fprobe(struct fprobe *fp); +int unregister_fprobe_async(struct fprobe *fp); bool fprobe_is_registered(struct fprobe *fp); int fprobe_count_ips_from_filter(const char *filter, const char *notfilter); #else @@ -113,6 +114,10 @@ static inline int unregister_fprobe(struct fprobe *fp) { return -EOPNOTSUPP; } +static inline int unregister_fprobe_async(struct fprobe *fp) +{ + return -EOPNOTSUPP; +} static inline bool fprobe_is_registered(struct fprobe *fp) { return false; diff --git a/include/linux/fs.h b/include/linux/fs.h index e1d257e6da68..11559c513dfb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2062,20 +2062,13 @@ void compat_set_desc_from_vma(struct vm_area_desc *desc, const struct file *file const struct vm_area_struct *vma); int __compat_vma_mmap(struct vm_area_desc *desc, struct vm_area_struct *vma); int compat_vma_mmap(struct file *file, struct vm_area_struct *vma); -int __vma_check_mmap_hook(struct vm_area_struct *vma); static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma) { - int err; - if (file->f_op->mmap_prepare) return compat_vma_mmap(file, vma); - err = file->f_op->mmap(file, vma); - if (err) - return err; - - return __vma_check_mmap_hook(vma); + return file->f_op->mmap(file, vma); } static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc) diff --git a/include/linux/fsl/ntmp.h b/include/linux/fsl/ntmp.h index 916dc4fe7de3..83a449b4d6ec 100644 --- a/include/linux/fsl/ntmp.h +++ b/include/linux/fsl/ntmp.h @@ -31,6 +31,12 @@ struct netc_tbl_vers { u8 rsst_ver; }; +struct netc_swcbd { + void *buf; + dma_addr_t dma; + size_t size; +}; + struct netc_cbdr { struct device *dev; struct netc_cbdr_regs regs; @@ -44,9 +50,10 @@ struct netc_cbdr { void *addr_base_align; dma_addr_t dma_base; dma_addr_t dma_base_align; + struct netc_swcbd *swcbd; /* Serialize the order of command BD ring */ - spinlock_t ring_lock; + struct mutex ring_lock; }; struct ntmp_user { diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 95985400d3d8..e5cde39d6e85 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -915,6 +915,7 @@ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int obj_type); extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); +struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark); extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h deleted file mode 100644 index 5d70c3f98f5b..000000000000 --- a/include/linux/hdlcdrv.h +++ /dev/null @@ -1,276 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * hdlcdrv.h -- HDLC packet radio network driver. - * The Linux soundcard driver for 1200 baud and 9600 baud packet radio - * (C) 1996-1998 by Thomas Sailer, HB9JNX/AE4WA - */ -#ifndef _HDLCDRV_H -#define _HDLCDRV_H - - -#include <linux/netdevice.h> -#include <linux/if.h> -#include <linux/spinlock.h> -#include <uapi/linux/hdlcdrv.h> - -#define HDLCDRV_MAGIC 0x5ac6e778 -#define HDLCDRV_HDLCBUFFER 32 /* should be a power of 2 for speed reasons */ -#define HDLCDRV_BITBUFFER 256 /* should be a power of 2 for speed reasons */ -#undef HDLCDRV_LOOPBACK /* define for HDLC debugging purposes */ -#define HDLCDRV_DEBUG - -/* maximum packet length, excluding CRC */ -#define HDLCDRV_MAXFLEN 400 - - -struct hdlcdrv_hdlcbuffer { - spinlock_t lock; - unsigned rd, wr; - unsigned short buf[HDLCDRV_HDLCBUFFER]; -}; - -#ifdef HDLCDRV_DEBUG -struct hdlcdrv_bitbuffer { - unsigned int rd; - unsigned int wr; - unsigned int shreg; - unsigned char buffer[HDLCDRV_BITBUFFER]; -}; - -static inline void hdlcdrv_add_bitbuffer(struct hdlcdrv_bitbuffer *buf, - unsigned int bit) -{ - unsigned char new; - - new = buf->shreg & 1; - buf->shreg >>= 1; - buf->shreg |= (!!bit) << 7; - if (new) { - buf->buffer[buf->wr] = buf->shreg; - buf->wr = (buf->wr+1) % sizeof(buf->buffer); - buf->shreg = 0x80; - } -} - -static inline void hdlcdrv_add_bitbuffer_word(struct hdlcdrv_bitbuffer *buf, - unsigned int bits) -{ - buf->buffer[buf->wr] = bits & 0xff; - buf->wr = (buf->wr+1) % sizeof(buf->buffer); - buf->buffer[buf->wr] = (bits >> 8) & 0xff; - buf->wr = (buf->wr+1) % sizeof(buf->buffer); - -} -#endif /* HDLCDRV_DEBUG */ - -/* -------------------------------------------------------------------- */ -/* - * Information that need to be kept for each driver. - */ - -struct hdlcdrv_ops { - /* - * first some informations needed by the hdlcdrv routines - */ - const char *drvname; - const char *drvinfo; - /* - * the routines called by the hdlcdrv routines - */ - int (*open)(struct net_device *); - int (*close)(struct net_device *); - int (*ioctl)(struct net_device *, void __user *, - struct hdlcdrv_ioctl *, int); -}; - -struct hdlcdrv_state { - int magic; - int opened; - - const struct hdlcdrv_ops *ops; - - struct { - int bitrate; - } par; - - struct hdlcdrv_pttoutput { - int dma2; - int seriobase; - int pariobase; - int midiiobase; - unsigned int flags; - } ptt_out; - - struct hdlcdrv_channel_params ch_params; - - struct hdlcdrv_hdlcrx { - struct hdlcdrv_hdlcbuffer hbuf; - unsigned long in_hdlc_rx; - /* 0 = sync hunt, != 0 receiving */ - int rx_state; - unsigned int bitstream; - unsigned int bitbuf; - int numbits; - unsigned char dcd; - - int len; - unsigned char *bp; - unsigned char buffer[HDLCDRV_MAXFLEN+2]; - } hdlcrx; - - struct hdlcdrv_hdlctx { - struct hdlcdrv_hdlcbuffer hbuf; - unsigned long in_hdlc_tx; - /* - * 0 = send flags - * 1 = send txtail (flags) - * 2 = send packet - */ - int tx_state; - int numflags; - unsigned int bitstream; - unsigned char ptt; - int calibrate; - int slotcnt; - - unsigned int bitbuf; - int numbits; - - int len; - unsigned char *bp; - unsigned char buffer[HDLCDRV_MAXFLEN+2]; - } hdlctx; - -#ifdef HDLCDRV_DEBUG - struct hdlcdrv_bitbuffer bitbuf_channel; - struct hdlcdrv_bitbuffer bitbuf_hdlc; -#endif /* HDLCDRV_DEBUG */ - - int ptt_keyed; - - /* queued skb for transmission */ - struct sk_buff *skb; -}; - - -/* -------------------------------------------------------------------- */ - -static inline int hdlcdrv_hbuf_full(struct hdlcdrv_hdlcbuffer *hb) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&hb->lock, flags); - ret = !((HDLCDRV_HDLCBUFFER - 1 + hb->rd - hb->wr) % HDLCDRV_HDLCBUFFER); - spin_unlock_irqrestore(&hb->lock, flags); - return ret; -} - -/* -------------------------------------------------------------------- */ - -static inline int hdlcdrv_hbuf_empty(struct hdlcdrv_hdlcbuffer *hb) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&hb->lock, flags); - ret = (hb->rd == hb->wr); - spin_unlock_irqrestore(&hb->lock, flags); - return ret; -} - -/* -------------------------------------------------------------------- */ - -static inline unsigned short hdlcdrv_hbuf_get(struct hdlcdrv_hdlcbuffer *hb) -{ - unsigned long flags; - unsigned short val; - unsigned newr; - - spin_lock_irqsave(&hb->lock, flags); - if (hb->rd == hb->wr) - val = 0; - else { - newr = (hb->rd+1) % HDLCDRV_HDLCBUFFER; - val = hb->buf[hb->rd]; - hb->rd = newr; - } - spin_unlock_irqrestore(&hb->lock, flags); - return val; -} - -/* -------------------------------------------------------------------- */ - -static inline void hdlcdrv_hbuf_put(struct hdlcdrv_hdlcbuffer *hb, - unsigned short val) -{ - unsigned newp; - unsigned long flags; - - spin_lock_irqsave(&hb->lock, flags); - newp = (hb->wr+1) % HDLCDRV_HDLCBUFFER; - if (newp != hb->rd) { - hb->buf[hb->wr] = val & 0xffff; - hb->wr = newp; - } - spin_unlock_irqrestore(&hb->lock, flags); -} - -/* -------------------------------------------------------------------- */ - -static inline void hdlcdrv_putbits(struct hdlcdrv_state *s, unsigned int bits) -{ - hdlcdrv_hbuf_put(&s->hdlcrx.hbuf, bits); -} - -static inline unsigned int hdlcdrv_getbits(struct hdlcdrv_state *s) -{ - unsigned int ret; - - if (hdlcdrv_hbuf_empty(&s->hdlctx.hbuf)) { - if (s->hdlctx.calibrate > 0) - s->hdlctx.calibrate--; - else - s->hdlctx.ptt = 0; - ret = 0; - } else - ret = hdlcdrv_hbuf_get(&s->hdlctx.hbuf); -#ifdef HDLCDRV_LOOPBACK - hdlcdrv_hbuf_put(&s->hdlcrx.hbuf, ret); -#endif /* HDLCDRV_LOOPBACK */ - return ret; -} - -static inline void hdlcdrv_channelbit(struct hdlcdrv_state *s, unsigned int bit) -{ -#ifdef HDLCDRV_DEBUG - hdlcdrv_add_bitbuffer(&s->bitbuf_channel, bit); -#endif /* HDLCDRV_DEBUG */ -} - -static inline void hdlcdrv_setdcd(struct hdlcdrv_state *s, int dcd) -{ - s->hdlcrx.dcd = !!dcd; -} - -static inline int hdlcdrv_ptt(struct hdlcdrv_state *s) -{ - return s->hdlctx.ptt || (s->hdlctx.calibrate > 0); -} - -/* -------------------------------------------------------------------- */ - -void hdlcdrv_receiver(struct net_device *, struct hdlcdrv_state *); -void hdlcdrv_transmitter(struct net_device *, struct hdlcdrv_state *); -void hdlcdrv_arbitrate(struct net_device *, struct hdlcdrv_state *); -struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops, - unsigned int privsize, const char *ifname, - unsigned int baseaddr, unsigned int irq, - unsigned int dma); -void hdlcdrv_unregister(struct net_device *dev); - -/* -------------------------------------------------------------------- */ - - - -#endif /* _HDLCDRV_H */ diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 6ca92bff02c6..ea6bef9b6012 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -271,7 +271,7 @@ struct hsi_controller { struct module *owner; unsigned int id; unsigned int num_ports; - struct hsi_port **port; + struct hsi_port *port[] __counted_by(num_ports); }; #define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device) diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h index f35b42e8c5de..74b91244fe0e 100644 --- a/include/linux/hwspinlock.h +++ b/include/linux/hwspinlock.h @@ -25,34 +25,6 @@ struct hwspinlock; struct hwspinlock_device; struct hwspinlock_ops; -/** - * struct hwspinlock_pdata - platform data for hwspinlock drivers - * @base_id: base id for this hwspinlock device - * - * hwspinlock devices provide system-wide hardware locks that are used - * by remote processors that have no other way to achieve synchronization. - * - * To achieve that, each physical lock must have a system-wide id number - * that is agreed upon, otherwise remote processors can't possibly assume - * they're using the same hardware lock. - * - * Usually boards have a single hwspinlock device, which provides several - * hwspinlocks, and in this case, they can be trivially numbered 0 to - * (num-of-locks - 1). - * - * In case boards have several hwspinlocks devices, a different base id - * should be used for each hwspinlock device (they can't all use 0 as - * a starting id!). - * - * This platform data structure should be used to provide the base id - * for each device (which is trivially 0 when only a single hwspinlock - * device exists). It can be shared between different platforms, hence - * its location. - */ -struct hwspinlock_pdata { - int base_id; -}; - #ifdef CONFIG_HWSPINLOCK int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index a26fb8e7cedf..964f1be8150c 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1304,6 +1304,8 @@ static inline void *hv_get_drvdata(struct hv_device *dev) struct device *hv_get_vmbus_root_device(void); +bool hv_vmbus_exists(void); + struct hv_ring_buffer_debug_info { u32 current_interrupt_mask; u32 current_read_index; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index e6272f9c5e42..20cc16ea4e5a 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -147,11 +147,13 @@ extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); * @priority: skb priority * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 * @next: pointer to next struct + * @rcu: used for deferred freeing of mapping nodes */ struct vlan_priority_tci_mapping { u32 priority; u16 vlan_qos; - struct vlan_priority_tci_mapping *next; + struct vlan_priority_tci_mapping __rcu *next; + struct rcu_head rcu; }; struct proc_dir_entry; @@ -177,7 +179,7 @@ struct vlan_dev_priv { unsigned int nr_ingress_mappings; u32 ingress_priority_map[8]; unsigned int nr_egress_mappings; - struct vlan_priority_tci_mapping *egress_priority_map[16]; + struct vlan_priority_tci_mapping __rcu *egress_priority_map[16]; __be16 vlan_proto; u16 vlan_id; @@ -209,19 +211,24 @@ static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) { struct vlan_priority_tci_mapping *mp; + u16 vlan_qos = 0; - smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ + rcu_read_lock(); - mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; + mp = rcu_dereference(vlan_dev_priv(dev)->egress_priority_map[skprio & 0xF]); while (mp) { if (mp->priority == skprio) { - return mp->vlan_qos; /* This should already be shifted - * to mask correctly with the - * VLAN's TCI */ + vlan_qos = READ_ONCE(mp->vlan_qos); + break; } - mp = mp->next; + mp = rcu_dereference(mp->next); } - return 0; + rcu_read_unlock(); + + /* This should already be shifted to mask correctly with + * the VLAN's TCI. + */ + return vlan_qos; } extern bool vlan_do_receive(struct sk_buff **skb); diff --git a/include/linux/iio/adc/qcom-adc5-gen3-common.h b/include/linux/iio/adc/qcom-adc5-gen3-common.h new file mode 100644 index 000000000000..6303eaa6640b --- /dev/null +++ b/include/linux/iio/adc/qcom-adc5-gen3-common.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + * + * Code used in the main and auxiliary Qualcomm PMIC voltage ADCs + * of type ADC5 Gen3. + */ + +#ifndef QCOM_ADC5_GEN3_COMMON_H +#define QCOM_ADC5_GEN3_COMMON_H + +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/iio/adc/qcom-vadc-common.h> +#include <linux/regmap.h> +#include <linux/types.h> + +#define ADC5_GEN3_HS 0x45 +#define ADC5_GEN3_HS_BUSY BIT(7) +#define ADC5_GEN3_HS_READY BIT(0) + +#define ADC5_GEN3_STATUS1 0x46 +#define ADC5_GEN3_STATUS1_CONV_FAULT BIT(7) +#define ADC5_GEN3_STATUS1_THR_CROSS BIT(6) +#define ADC5_GEN3_STATUS1_EOC BIT(0) + +#define ADC5_GEN3_TM_EN_STS 0x47 +#define ADC5_GEN3_TM_HIGH_STS 0x48 +#define ADC5_GEN3_TM_LOW_STS 0x49 + +#define ADC5_GEN3_EOC_STS 0x4a +#define ADC5_GEN3_EOC_CHAN_0 BIT(0) + +#define ADC5_GEN3_EOC_CLR 0x4b +#define ADC5_GEN3_TM_HIGH_STS_CLR 0x4c +#define ADC5_GEN3_TM_LOW_STS_CLR 0x4d +#define ADC5_GEN3_CONV_ERR_CLR 0x4e +#define ADC5_GEN3_CONV_ERR_CLR_REQ BIT(0) + +#define ADC5_GEN3_SID 0x4f +#define ADC5_GEN3_SID_MASK GENMASK(3, 0) + +#define ADC5_GEN3_PERPH_CH 0x50 +#define ADC5_GEN3_CHAN_CONV_REQ BIT(7) + +#define ADC5_GEN3_TIMER_SEL 0x51 +#define ADC5_GEN3_TIME_IMMEDIATE 0x1 + +#define ADC5_GEN3_DIG_PARAM 0x52 +#define ADC5_GEN3_DIG_PARAM_CAL_SEL_MASK GENMASK(5, 4) +#define ADC5_GEN3_DIG_PARAM_DEC_RATIO_SEL_MASK GENMASK(3, 2) + +#define ADC5_GEN3_FAST_AVG 0x53 +#define ADC5_GEN3_FAST_AVG_CTL_EN BIT(7) +#define ADC5_GEN3_FAST_AVG_CTL_SAMPLES_MASK GENMASK(2, 0) + +#define ADC5_GEN3_ADC_CH_SEL_CTL 0x54 +#define ADC5_GEN3_DELAY_CTL 0x55 +#define ADC5_GEN3_HW_SETTLE_DELAY_MASK GENMASK(3, 0) + +#define ADC5_GEN3_CH_EN 0x56 +#define ADC5_GEN3_HIGH_THR_INT_EN BIT(1) +#define ADC5_GEN3_LOW_THR_INT_EN BIT(0) + +#define ADC5_GEN3_LOW_THR0 0x57 +#define ADC5_GEN3_LOW_THR1 0x58 +#define ADC5_GEN3_HIGH_THR0 0x59 +#define ADC5_GEN3_HIGH_THR1 0x5a + +#define ADC5_GEN3_CH_DATA0(channel) (0x5c + (channel) * 2) +#define ADC5_GEN3_CH_DATA1(channel) (0x5d + (channel) * 2) + +#define ADC5_GEN3_CONV_REQ 0xe5 +#define ADC5_GEN3_CONV_REQ_REQ BIT(0) + +#define ADC5_GEN3_VIRTUAL_SID_MASK GENMASK(15, 8) +#define ADC5_GEN3_CHANNEL_MASK GENMASK(7, 0) +#define ADC5_GEN3_V_CHAN(x) \ + (FIELD_PREP(ADC5_GEN3_VIRTUAL_SID_MASK, (x).sid) | (x).channel) + +/* ADC channels for PMIC5 Gen3 */ +#define ADC5_GEN3_REF_GND 0x00 +#define ADC5_GEN3_1P25VREF 0x01 +#define ADC5_GEN3_DIE_TEMP 0x03 +#define ADC5_GEN3_USB_SNS_V_16 0x11 +#define ADC5_GEN3_VIN_DIV16_MUX 0x12 +#define ADC5_GEN3_VPH_PWR 0x8e +#define ADC5_GEN3_VBAT_SNS_QBG 0x8f +/* 100k pull-up channels */ +#define ADC5_GEN3_AMUX1_THM_100K_PU 0x44 +#define ADC5_GEN3_AMUX2_THM_100K_PU 0x45 +#define ADC5_GEN3_AMUX3_THM_100K_PU 0x46 +#define ADC5_GEN3_AMUX4_THM_100K_PU 0x47 +#define ADC5_GEN3_AMUX5_THM_100K_PU 0x48 +#define ADC5_GEN3_AMUX6_THM_100K_PU 0x49 +#define ADC5_GEN3_AMUX1_GPIO_100K_PU 0x4a +#define ADC5_GEN3_AMUX2_GPIO_100K_PU 0x4b +#define ADC5_GEN3_AMUX3_GPIO_100K_PU 0x4c +#define ADC5_GEN3_AMUX4_GPIO_100K_PU 0x4d + +#define ADC5_MAX_CHANNEL 0xc0 + +enum adc5_cal_method { + ADC5_NO_CAL = 0, + ADC5_RATIOMETRIC_CAL, + ADC5_ABSOLUTE_CAL, +}; + +enum adc5_time_select { + MEAS_INT_DISABLE = 0, + MEAS_INT_IMMEDIATE, + MEAS_INT_50MS, + MEAS_INT_100MS, + MEAS_INT_1S, + MEAS_INT_NONE, +}; + +/** + * struct adc5_sdam_data - data per SDAM allocated for adc usage + * @base_addr: base address for the ADC SDAM peripheral. + * @irq_name: ADC IRQ name. + * @irq: ADC IRQ number. + */ +struct adc5_sdam_data { + u16 base_addr; + const char *irq_name; + int irq; +}; + +/** + * struct adc5_device_data - Top-level ADC device data + * @regmap: ADC peripheral register map field. + * @base: array of SDAM data. + * @num_sdams: number of ADC SDAM peripherals. + */ +struct adc5_device_data { + struct regmap *regmap; + struct adc5_sdam_data *base; + int num_sdams; +}; + +/** + * struct adc5_channel_common_prop - ADC channel properties (common to ADC and TM). + * @channel: channel number, refer to the channel list. + * @cal_method: calibration method. + * @decimation: sampling rate supported for the channel. + * @sid: ID of PMIC owning the channel. + * @label: Channel name used in device tree. + * @prescale: channel scaling performed on the input signal. + * @hw_settle_time_us: the time between AMUX being configured and the + * start of conversion in uS. + * @avg_samples: ability to provide single result from the ADC + * that is an average of multiple measurements. + * @scale_fn_type: Represents the scaling function to convert voltage + * physical units desired by the client for the channel. + */ +struct adc5_channel_common_prop { + unsigned int channel; + enum adc5_cal_method cal_method; + unsigned int decimation; + unsigned int sid; + const char *label; + unsigned int prescale; + unsigned int hw_settle_time_us; + unsigned int avg_samples; + enum vadc_scale_fn_type scale_fn_type; +}; + +/** + * struct tm5_aux_dev_wrapper - wrapper structure around TM auxiliary device + * @aux_dev: TM auxiliary device structure. + * @dev_data: Top-level ADC device data. + * @tm_props: Array of common ADC channel properties for TM channels. + * @n_tm_channels: number of TM channels. + */ +struct tm5_aux_dev_wrapper { + struct auxiliary_device aux_dev; + struct adc5_device_data *dev_data; + struct adc5_channel_common_prop *tm_props; + unsigned int n_tm_channels; +}; + +int adc5_gen3_read(struct adc5_device_data *adc, unsigned int sdam_index, + u16 offset, u8 *data, int len); + +int adc5_gen3_write(struct adc5_device_data *adc, unsigned int sdam_index, + u16 offset, u8 *data, int len); + +int adc5_gen3_poll_wait_hs(struct adc5_device_data *adc, + unsigned int sdam_index); + +void adc5_gen3_update_dig_param(struct adc5_channel_common_prop *prop, + u8 *data); + +int adc5_gen3_status_clear(struct adc5_device_data *adc, + int sdam_index, u16 offset, u8 *val, int len); + +void adc5_gen3_mutex_lock(struct device *dev); +void adc5_gen3_mutex_unlock(struct device *dev); +int adc5_gen3_get_scaled_reading(struct device *dev, + struct adc5_channel_common_prop *common_props, + int *val); +int adc5_gen3_therm_code_to_temp(struct device *dev, + struct adc5_channel_common_prop *common_props, + u16 code, int *val); +void adc5_gen3_register_tm_event_notifier(struct device *dev, + void (*handler)(struct auxiliary_device *)); + +#endif /* QCOM_ADC5_GEN3_COMMON_H */ diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h index 7f815f3fed6a..4d15c2a9802c 100644 --- a/include/linux/iio/backend.h +++ b/include/linux/iio/backend.h @@ -85,6 +85,27 @@ enum iio_backend_filter_type { }; /** + * enum iio_backend_capabilities - Backend capabilities + * Backend capabilities can be used by frontends to check if a given + * functionality is supported by the backend. This is useful for frontend + * devices which are expected to work with alternative backend + * implementations. Capabilities are loosely coupled with operations, + * meaning that a capability requires certain operations to be implemented + * by the backend. A capability might be mapped to a single operation or + * multiple operations. + * + * @IIO_BACKEND_CAP_CALIBRATION: Backend supports digital interface + * calibration. Calibration procedure is device specific. + * @IIO_BACKEND_CAP_BUFFER: Support for IIO buffer interface. + * @IIO_BACKEND_CAP_ENABLE: Backend can be explicitly enabled/disabled. + */ +enum iio_backend_capabilities { + IIO_BACKEND_CAP_CALIBRATION = BIT(0), + IIO_BACKEND_CAP_BUFFER = BIT(1), + IIO_BACKEND_CAP_ENABLE = BIT(2), +}; + +/** * struct iio_backend_ops - operations structure for an iio_backend * @enable: Enable backend. * @disable: Disable backend. @@ -179,10 +200,12 @@ struct iio_backend_ops { * struct iio_backend_info - info structure for an iio_backend * @name: Backend name. * @ops: Backend operations. + * @caps: Backend capabilities. (bitmask of enum iio_backend_capabilities). */ struct iio_backend_info { const char *name; const struct iio_backend_ops *ops; + u32 caps; }; int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan); @@ -235,6 +258,7 @@ int iio_backend_read_raw(struct iio_backend *back, long mask); int iio_backend_extend_chan_spec(struct iio_backend *back, struct iio_chan_spec *chan); +bool iio_backend_has_caps(struct iio_backend *back, u32 caps); void *iio_backend_get_priv(const struct iio_backend *conv); struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name); struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev, diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index f9ae5cdd884f..1ba496f0fea5 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -160,12 +160,12 @@ struct st_sensor_int_drdy { /** * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt - * struct int1 - data-ready configuration register for INT1 pin. - * struct int2 - data-ready configuration register for INT2 pin. + * @int1: data-ready configuration register for INT1 pin. + * @int2: data-ready configuration register for INT2 pin. * @addr_ihl: address to enable/disable active low on the INT lines. * @mask_ihl: mask to enable/disable active low on the INT lines. - * struct stat_drdy - status register of DRDY (data ready) interrupt. - * struct ig1 - represents the Interrupt Generator 1 of sensors. + * @stat_drdy: status register of DRDY (data ready) interrupt. + * @ig1: represents the Interrupt Generator 1 of sensors. * @en_addr: address of the enable ig1 register. * @en_mask: mask to write the on/off value for enable. */ @@ -190,6 +190,7 @@ struct st_sensor_data_ready_irq { * @wai_addr: The address of WhoAmI register. * @sensors_supported: List of supported sensors by struct itself. * @ch: IIO channels for the sensor. + * @num_ch: Number of IIO channels in @ch * @odr: Output data rate register and ODR list available. * @pw: Power register of the sensor. * @enable_axis: Enable one or more axis of the sensor. @@ -228,7 +229,7 @@ struct st_sensor_settings { * @regmap: Pointer to specific sensor regmap configuration. * @enabled: Status of the sensor (false->off, true->on). * @odr: Output data rate of the sensor [Hz]. - * num_data_channels: Number of data channels used in buffer. + * @num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * @int_pin_open_drain: Set the interrupt/DRDY to open drain. * @irq: the IRQ number. diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 34eebad12d2c..4e3099defc1d 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -21,6 +21,7 @@ enum iio_event_info { IIO_EV_INFO_TAP2_MIN_DELAY, IIO_EV_INFO_RUNNING_PERIOD, IIO_EV_INFO_RUNNING_COUNT, + IIO_EV_INFO_SCALE, }; #define IIO_VAL_INT 1 diff --git a/include/linux/ima.h b/include/linux/ima.h index abf8923f8fc5..8e08baf16c2f 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -11,6 +11,7 @@ #include <linux/fs.h> #include <linux/security.h> #include <linux/kexec.h> +#include <linux/secure_boot.h> #include <crypto/hash_info.h> struct linux_binprm; @@ -73,14 +74,8 @@ int ima_validate_range(phys_addr_t phys, size_t size); #endif #ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT -extern bool arch_ima_get_secureboot(void); extern const char * const *arch_get_ima_policy(void); #else -static inline bool arch_ima_get_secureboot(void) -{ - return false; -} - static inline const char * const *arch_get_ima_policy(void) { return NULL; diff --git a/include/linux/input.h b/include/linux/input.h index 7d7cb0593a63..06ca62328db1 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -517,6 +517,10 @@ INPUT_GENERATE_ABS_ACCESSORS(res, resolution) int input_scancode_to_scalar(const struct input_keymap_entry *ke, unsigned int *scancode); +int input_default_setkeycode(struct input_dev *dev, + const struct input_keymap_entry *ke, + unsigned int *old_keycode); + int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); int input_set_keycode(struct input_dev *dev, const struct input_keymap_entry *ke); diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h index 1a0f357c2427..1fe5665a9d02 100644 --- a/include/linux/intel_vsec.h +++ b/include/linux/intel_vsec.h @@ -29,9 +29,15 @@ #define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) #define TABLE_OFFSET_SHIFT 3 +struct device; struct pci_dev; struct resource; +enum intel_vsec_disc_source { + INTEL_VSEC_DISC_PCI, /* PCI, default */ + INTEL_VSEC_DISC_ACPI, /* ACPI */ +}; + enum intel_vsec_id { VSEC_ID_TELEMETRY = 2, VSEC_ID_WATCHER = 3, @@ -82,14 +88,14 @@ enum intel_vsec_quirks { * struct pmt_callbacks - Callback infrastructure for PMT devices * @read_telem: when specified, called by client driver to access PMT * data (instead of direct copy). - * * pdev: PCI device reference for the callback's use + * * dev: device reference for the callback's use * * guid: ID of data to acccss * * data: buffer for the data to be copied * * off: offset into the requested buffer * * count: size of buffer */ struct pmt_callbacks { - int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count); + int (*read_telem)(struct device *dev, u32 guid, u64 *data, loff_t off, u32 count); }; struct vsec_feature_dependency { @@ -102,6 +108,10 @@ struct vsec_feature_dependency { * @parent: parent device in the auxbus chain * @headers: list of headers to define the PMT client devices to create * @deps: array of feature dependencies + * @acpi_disc: ACPI discovery tables, each entry is two QWORDs + * in little-endian format as defined by the PMT ACPI spec. + * Valid only when @provider == INTEL_VSEC_DISC_ACPI. + * @src: source of discovery table data * @priv_data: private data, usable by parent devices, currently a callback * @caps: bitmask of PMT capabilities for the given headers * @quirks: bitmask of VSEC device quirks @@ -112,6 +122,8 @@ struct intel_vsec_platform_info { struct device *parent; struct intel_vsec_header **headers; const struct vsec_feature_dependency *deps; + u32 (*acpi_disc)[4]; + enum intel_vsec_disc_source src; void *priv_data; unsigned long caps; unsigned long quirks; @@ -122,8 +134,13 @@ struct intel_vsec_platform_info { /** * struct intel_vsec_device - Auxbus specific device information * @auxdev: auxbus device struct for auxbus access - * @pcidev: pci device associated with the device - * @resource: any resources shared by the parent + * @dev: struct device associated with the device + * @resource: PCI discovery resources (BAR windows), one per discovery + * instance. Valid only when @src == INTEL_VSEC_DISC_PCI + * @acpi_disc: ACPI discovery tables, each entry is two QWORDs + * in little-endian format as defined by the PMT ACPI spec. + * Valid only when @src == INTEL_VSEC_DISC_ACPI. + * @src: source of discovery table data * @ida: id reference * @num_resources: number of resources * @id: xarray id @@ -135,8 +152,10 @@ struct intel_vsec_platform_info { */ struct intel_vsec_device { struct auxiliary_device auxdev; - struct pci_dev *pcidev; + struct device *dev; struct resource *resource; + u32 (*acpi_disc)[4]; + enum intel_vsec_disc_source src; struct ida *ida; int num_resources; int id; /* xa */ @@ -184,7 +203,7 @@ struct pmt_feature_group { struct telemetry_region regions[]; }; -int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, +int intel_vsec_add_aux(struct device *parent, struct intel_vsec_device *intel_vsec_dev, const char *name); @@ -199,14 +218,14 @@ static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device } #if IS_ENABLED(CONFIG_INTEL_VSEC) -int intel_vsec_register(struct pci_dev *pdev, - struct intel_vsec_platform_info *info); +int intel_vsec_register(struct device *dev, + const struct intel_vsec_platform_info *info); int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info, struct intel_vsec_device *vsec_dev); struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev); #else -static inline int intel_vsec_register(struct pci_dev *pdev, - struct intel_vsec_platform_info *info) +static inline int intel_vsec_register(struct device *dev, + const struct intel_vsec_platform_info *info) { return -ENODEV; } diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h deleted file mode 100644 index 12be09b6883b..000000000000 --- a/include/linux/isdn/capilli.h +++ /dev/null @@ -1,95 +0,0 @@ -/* $Id: capilli.h,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $ - * - * Kernel CAPI 2.0 Driver Interface for Linux - * - * Copyright 1999 by Carsten Paeth <calle@calle.de> - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#ifndef __CAPILLI_H__ -#define __CAPILLI_H__ - -#include <linux/kernel.h> -#include <linux/list.h> -#include <linux/capi.h> -#include <linux/kernelcapi.h> - -typedef struct capiloaddatapart { - int user; /* data in userspace ? */ - int len; - unsigned char *data; -} capiloaddatapart; - -typedef struct capiloaddata { - capiloaddatapart firmware; - capiloaddatapart configuration; -} capiloaddata; - -typedef struct capicardparams { - unsigned int port; - unsigned irq; - int cardtype; - int cardnr; - unsigned int membase; -} capicardparams; - -struct capi_ctr { - /* filled in before calling attach_capi_ctr */ - struct module *owner; - void *driverdata; /* driver specific */ - char name[32]; /* name of controller */ - char *driver_name; /* name of driver */ - int (*load_firmware)(struct capi_ctr *, capiloaddata *); - void (*reset_ctr)(struct capi_ctr *); - void (*register_appl)(struct capi_ctr *, u16 appl, - capi_register_params *); - void (*release_appl)(struct capi_ctr *, u16 appl); - u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb); - - char *(*procinfo)(struct capi_ctr *); - int (*proc_show)(struct seq_file *, void *); - - /* filled in before calling ready callback */ - u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */ - capi_version version; /* CAPI_GET_VERSION */ - capi_profile profile; /* CAPI_GET_PROFILE */ - u8 serial[CAPI_SERIAL_LEN]; /* CAPI_GET_SERIAL */ - - /* management information for kcapi */ - - unsigned long nrecvctlpkt; - unsigned long nrecvdatapkt; - unsigned long nsentctlpkt; - unsigned long nsentdatapkt; - - int cnr; /* controller number */ - unsigned short state; /* controller state */ - int blocked; /* output blocked */ - int traceflag; /* capi trace */ - - struct proc_dir_entry *procent; - char procfn[128]; -}; - -int attach_capi_ctr(struct capi_ctr *); -int detach_capi_ctr(struct capi_ctr *); - -void capi_ctr_ready(struct capi_ctr * card); -void capi_ctr_down(struct capi_ctr * card); -void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb); - -// --------------------------------------------------------------------------- -// needed for AVM capi drivers - -struct capi_driver { - char name[32]; /* driver name */ - char revision[32]; - - /* management information for kcapi */ - struct list_head list; -}; - -#endif /* __CAPILLI_H__ */ diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h deleted file mode 100644 index 953fd500dff7..000000000000 --- a/include/linux/isdn/capiutil.h +++ /dev/null @@ -1,60 +0,0 @@ -/* $Id: capiutil.h,v 1.5.6.2 2001/09/23 22:24:33 kai Exp $ - * - * CAPI 2.0 defines & types - * - * From CAPI 2.0 Development Kit AVM 1995 (msg.c) - * Rewritten for Linux 1996 by Carsten Paeth <calle@calle.de> - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#ifndef __CAPIUTIL_H__ -#define __CAPIUTIL_H__ - -#include <asm/types.h> - -#define CAPIMSG_BASELEN 8 -#define CAPIMSG_U8(m, off) (m[off]) -#define CAPIMSG_U16(m, off) (m[off]|(m[(off)+1]<<8)) -#define CAPIMSG_U32(m, off) (m[off]|(m[(off)+1]<<8)|(m[(off)+2]<<16)|(m[(off)+3]<<24)) -#define CAPIMSG_LEN(m) CAPIMSG_U16(m,0) -#define CAPIMSG_APPID(m) CAPIMSG_U16(m,2) -#define CAPIMSG_COMMAND(m) CAPIMSG_U8(m,4) -#define CAPIMSG_SUBCOMMAND(m) CAPIMSG_U8(m,5) -#define CAPIMSG_CMD(m) (((m[4])<<8)|(m[5])) -#define CAPIMSG_MSGID(m) CAPIMSG_U16(m,6) -#define CAPIMSG_CONTROLLER(m) (m[8] & 0x7f) -#define CAPIMSG_CONTROL(m) CAPIMSG_U32(m, 8) -#define CAPIMSG_NCCI(m) CAPIMSG_CONTROL(m) -#define CAPIMSG_DATALEN(m) CAPIMSG_U16(m,16) /* DATA_B3_REQ */ - -static inline void capimsg_setu8(void *m, int off, __u8 val) -{ - ((__u8 *)m)[off] = val; -} - -static inline void capimsg_setu16(void *m, int off, __u16 val) -{ - ((__u8 *)m)[off] = val & 0xff; - ((__u8 *)m)[off+1] = (val >> 8) & 0xff; -} - -static inline void capimsg_setu32(void *m, int off, __u32 val) -{ - ((__u8 *)m)[off] = val & 0xff; - ((__u8 *)m)[off+1] = (val >> 8) & 0xff; - ((__u8 *)m)[off+2] = (val >> 16) & 0xff; - ((__u8 *)m)[off+3] = (val >> 24) & 0xff; -} - -#define CAPIMSG_SETLEN(m, len) capimsg_setu16(m, 0, len) -#define CAPIMSG_SETAPPID(m, applid) capimsg_setu16(m, 2, applid) -#define CAPIMSG_SETCOMMAND(m,cmd) capimsg_setu8(m, 4, cmd) -#define CAPIMSG_SETSUBCOMMAND(m, cmd) capimsg_setu8(m, 5, cmd) -#define CAPIMSG_SETMSGID(m, msgid) capimsg_setu16(m, 6, msgid) -#define CAPIMSG_SETCONTROL(m, contr) capimsg_setu32(m, 8, contr) -#define CAPIMSG_SETDATALEN(m, len) capimsg_setu16(m, 16, len) - -#endif /* __CAPIUTIL_H__ */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index a53a00d36228..7e785aa6d35d 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -429,22 +429,46 @@ struct jbd2_inode { unsigned long i_flags; /** - * @i_dirty_start: + * @i_dirty_start_page: + * + * Dirty range start in PAGE_SIZE units. + * + * The dirty range is empty if @i_dirty_start_page is greater than or + * equal to @i_dirty_end_page. * - * Offset in bytes where the dirty range for this inode starts. * [j_list_lock] */ - loff_t i_dirty_start; + pgoff_t i_dirty_start_page; /** - * @i_dirty_end: + * @i_dirty_end_page: + * + * Dirty range end in PAGE_SIZE units (exclusive). * - * Inclusive offset in bytes where the dirty range for this inode - * ends. [j_list_lock] + * [j_list_lock] */ - loff_t i_dirty_end; + pgoff_t i_dirty_end_page; }; +/* + * Lockless readers treat start_page >= end_page as an empty range. + * Writers publish a new non-empty range by storing i_dirty_end_page before + * i_dirty_start_page. + */ +static inline bool jbd2_jinode_get_dirty_range(const struct jbd2_inode *jinode, + loff_t *start, loff_t *end) +{ + pgoff_t start_page = READ_ONCE(jinode->i_dirty_start_page); + pgoff_t end_page = READ_ONCE(jinode->i_dirty_end_page); + + if (start_page >= end_page) + return false; + + *start = (loff_t)start_page << PAGE_SHIFT; + *end = ((loff_t)end_page << PAGE_SHIFT) - 1; + return true; +} + struct jbd2_revoke_table_s; /** diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h deleted file mode 100644 index 94ba42bf9da1..000000000000 --- a/include/linux/kernelcapi.h +++ /dev/null @@ -1,45 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * $Id: kernelcapi.h,v 1.8.6.2 2001/02/07 11:31:31 kai Exp $ - * - * Kernel CAPI 2.0 Interface for Linux - * - * (c) Copyright 1997 by Carsten Paeth (calle@calle.in-berlin.de) - * - */ -#ifndef __KERNELCAPI_H__ -#define __KERNELCAPI_H__ - -#include <linux/list.h> -#include <linux/skbuff.h> -#include <linux/workqueue.h> -#include <linux/notifier.h> -#include <uapi/linux/kernelcapi.h> - -#define CAPI_NOERROR 0x0000 - -#define CAPI_TOOMANYAPPLS 0x1001 -#define CAPI_LOGBLKSIZETOSMALL 0x1002 -#define CAPI_BUFFEXECEEDS64K 0x1003 -#define CAPI_MSGBUFSIZETOOSMALL 0x1004 -#define CAPI_ANZLOGCONNNOTSUPPORTED 0x1005 -#define CAPI_REGRESERVED 0x1006 -#define CAPI_REGBUSY 0x1007 -#define CAPI_REGOSRESOURCEERR 0x1008 -#define CAPI_REGNOTINSTALLED 0x1009 -#define CAPI_REGCTRLERNOTSUPPORTEXTEQUIP 0x100a -#define CAPI_REGCTRLERONLYSUPPORTEXTEQUIP 0x100b - -#define CAPI_ILLAPPNR 0x1101 -#define CAPI_ILLCMDORSUBCMDORMSGTOSMALL 0x1102 -#define CAPI_SENDQUEUEFULL 0x1103 -#define CAPI_RECEIVEQUEUEEMPTY 0x1104 -#define CAPI_RECEIVEOVERFLOW 0x1105 -#define CAPI_UNKNOWNNOTPAR 0x1106 -#define CAPI_MSGBUSY 0x1107 -#define CAPI_MSGOSRESOURCEERR 0x1108 -#define CAPI_MSGNOTINSTALLED 0x1109 -#define CAPI_MSGCTRLERNOTSUPPORTEXTEQUIP 0x110a -#define CAPI_MSGCTRLERONLYSUPPORTEXTEQUIP 0x110b - -#endif /* __KERNELCAPI_H__ */ diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h index ac4129d1d741..8968c56d2d73 100644 --- a/include/linux/kexec_handover.h +++ b/include/linux/kexec_handover.h @@ -32,9 +32,9 @@ void kho_restore_free(void *mem); struct folio *kho_restore_folio(phys_addr_t phys); struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages); void *kho_restore_vmalloc(const struct kho_vmalloc *preservation); -int kho_add_subtree(const char *name, void *fdt); -void kho_remove_subtree(void *fdt); -int kho_retrieve_subtree(const char *name, phys_addr_t *phys); +int kho_add_subtree(const char *name, void *blob, size_t size); +void kho_remove_subtree(void *blob); +int kho_retrieve_subtree(const char *name, phys_addr_t *phys, size_t *size); void kho_memory_init(void); @@ -97,14 +97,15 @@ static inline void *kho_restore_vmalloc(const struct kho_vmalloc *preservation) return NULL; } -static inline int kho_add_subtree(const char *name, void *fdt) +static inline int kho_add_subtree(const char *name, void *blob, size_t size) { return -EOPNOTSUPP; } -static inline void kho_remove_subtree(void *fdt) { } +static inline void kho_remove_subtree(void *blob) { } -static inline int kho_retrieve_subtree(const char *name, phys_addr_t *phys) +static inline int kho_retrieve_subtree(const char *name, phys_addr_t *phys, + size_t *size) { return -EOPNOTSUPP; } diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 22b3f3839f30..6c46591a2eac 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -202,9 +202,10 @@ extern void kgdb_call_nmi_hook(void *ignored); * * On SMP systems, we need to get the attention of the other CPUs * and get them into a known state. This should do what is needed - * to get the other CPUs to call kgdb_wait(). Note that on some arches, - * the NMI approach is not used for rounding up all the CPUs. Normally - * those architectures can just not implement this and get the default. + * to get the other CPUs to call kgdb_handle_exception(). Note that + * on some arches, the NMI approach is not used for rounding up all + * the CPUs. Normally those architectures can just not implement + * this and get the default. * * On non-SMP systems, this is not called. */ diff --git a/include/linux/kho/abi/kexec_handover.h b/include/linux/kho/abi/kexec_handover.h index 6b7d8ef550f9..7e847a2339b0 100644 --- a/include/linux/kho/abi/kexec_handover.h +++ b/include/linux/kho/abi/kexec_handover.h @@ -41,25 +41,28 @@ * restore the preserved data.:: * * / { - * compatible = "kho-v2"; + * compatible = "kho-v3"; * * preserved-memory-map = <0x...>; * * <subnode-name-1> { * preserved-data = <0x...>; + * blob-size = <0x...>; * }; * * <subnode-name-2> { * preserved-data = <0x...>; + * blob-size = <0x...>; * }; * ... ... * <subnode-name-N> { * preserved-data = <0x...>; + * blob-size = <0x...>; * }; * }; * * Root KHO Node (/): - * - compatible: "kho-v2" + * - compatible: "kho-v3" * * Indentifies the overall KHO ABI version. * @@ -78,16 +81,25 @@ * * Physical address pointing to a subnode data blob that is also * being preserved. + * + * - blob-size: u64 + * + * Size in bytes of the preserved data blob. This is needed because + * blobs may use arbitrary formats (not just FDT), so the size + * cannot be determined from the blob content alone. */ /* The compatible string for the KHO FDT root node. */ -#define KHO_FDT_COMPATIBLE "kho-v2" +#define KHO_FDT_COMPATIBLE "kho-v3" /* The FDT property for the preserved memory map. */ #define KHO_FDT_MEMORY_MAP_PROP_NAME "preserved-memory-map" /* The FDT property for preserved data blobs. */ -#define KHO_FDT_SUB_TREE_PROP_NAME "preserved-data" +#define KHO_SUB_TREE_PROP_NAME "preserved-data" + +/* The FDT property for the size of preserved data blobs. */ +#define KHO_SUB_TREE_SIZE_PROP_NAME "blob-size" /** * DOC: Kexec Handover ABI for vmalloc Preservation diff --git a/include/linux/kho/abi/kexec_metadata.h b/include/linux/kho/abi/kexec_metadata.h new file mode 100644 index 000000000000..e9e3f7e38a7c --- /dev/null +++ b/include/linux/kho/abi/kexec_metadata.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/** + * DOC: Kexec Metadata ABI + * + * The "kexec-metadata" subtree stores optional metadata about the kexec chain. + * It is registered via kho_add_subtree(), keeping it independent from the core + * KHO ABI. This allows the metadata format to evolve without affecting other + * KHO consumers. + * + * The metadata is stored as a plain C struct rather than FDT format for + * simplicity and direct field access. + * + * Copyright (c) 2026 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2026 Breno Leitao <leitao@debian.org> + */ + +#ifndef _LINUX_KHO_ABI_KEXEC_METADATA_H +#define _LINUX_KHO_ABI_KEXEC_METADATA_H + +#include <linux/types.h> +#include <linux/utsname.h> + +#define KHO_KEXEC_METADATA_VERSION 1 + +/** + * struct kho_kexec_metadata - Kexec metadata passed between kernels + * @version: ABI version of this struct (must be first field) + * @previous_release: Kernel version string that initiated the kexec + * @kexec_count: Number of kexec boots since last cold boot + * + * This structure is preserved across kexec and allows the new kernel to + * identify which kernel it was booted from and how many kexec reboots + * have occurred. + * + * __NEW_UTS_LEN is part of uABI, so it safe to use it in here. + */ +struct kho_kexec_metadata { + u32 version; + char previous_release[__NEW_UTS_LEN + 1]; + u32 kexec_count; +} __packed; + +#define KHO_METADATA_NODE_NAME "kexec-metadata" + +#endif /* _LINUX_KHO_ABI_KEXEC_METADATA_H */ diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h index 2e4f4c3539c0..0f3037f1a94f 100644 --- a/include/linux/linear_range.h +++ b/include/linux/linear_range.h @@ -57,5 +57,8 @@ void linear_range_get_selector_within(const struct linear_range *r, int linear_range_get_selector_low_array(const struct linear_range *r, int ranges, unsigned int val, unsigned int *selector, bool *found); +int linear_range_get_selector_high_array(const struct linear_range *r, + int ranges, unsigned int val, + unsigned int *selector, bool *found); #endif diff --git a/include/linux/liveupdate.h b/include/linux/liveupdate.h index dd11fdc76a5f..30c5a39ff9e9 100644 --- a/include/linux/liveupdate.h +++ b/include/linux/liveupdate.h @@ -12,6 +12,7 @@ #include <linux/kho/abi/luo.h> #include <linux/list.h> #include <linux/mutex.h> +#include <linux/rwsem.h> #include <linux/types.h> #include <uapi/linux/liveupdate.h> @@ -63,6 +64,7 @@ struct liveupdate_file_op_args { * finish, in order to do successful finish calls for all * resources in the session. * @finish: Required. Final cleanup in the new kernel. + * @get_id: Optional. Returns a unique identifier for the file. * @owner: Module reference * * All operations (except can_preserve) receive a pointer to a @@ -78,6 +80,7 @@ struct liveupdate_file_ops { int (*retrieve)(struct liveupdate_file_op_args *args); bool (*can_finish)(struct liveupdate_file_op_args *args); void (*finish)(struct liveupdate_file_op_args *args); + unsigned long (*get_id)(struct file *file); struct module *owner; }; @@ -228,12 +231,12 @@ bool liveupdate_enabled(void); int liveupdate_reboot(void); int liveupdate_register_file_handler(struct liveupdate_file_handler *fh); -int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh); +void liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh); int liveupdate_register_flb(struct liveupdate_file_handler *fh, struct liveupdate_flb *flb); -int liveupdate_unregister_flb(struct liveupdate_file_handler *fh, - struct liveupdate_flb *flb); +void liveupdate_unregister_flb(struct liveupdate_file_handler *fh, + struct liveupdate_flb *flb); int liveupdate_flb_get_incoming(struct liveupdate_flb *flb, void **objp); int liveupdate_flb_get_outgoing(struct liveupdate_flb *flb, void **objp); @@ -255,9 +258,8 @@ static inline int liveupdate_register_file_handler(struct liveupdate_file_handle return -EOPNOTSUPP; } -static inline int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh) +static inline void liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh) { - return -EOPNOTSUPP; } static inline int liveupdate_register_flb(struct liveupdate_file_handler *fh, @@ -266,10 +268,9 @@ static inline int liveupdate_register_flb(struct liveupdate_file_handler *fh, return -EOPNOTSUPP; } -static inline int liveupdate_unregister_flb(struct liveupdate_file_handler *fh, - struct liveupdate_flb *flb) +static inline void liveupdate_unregister_flb(struct liveupdate_file_handler *fh, + struct liveupdate_flb *flb) { - return -EOPNOTSUPP; } static inline int liveupdate_flb_get_incoming(struct liveupdate_flb *flb, diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index c53c81242e72..b614e0deea72 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -10,27 +10,20 @@ #ifndef LINUX_LOCKD_BIND_H #define LINUX_LOCKD_BIND_H -#include <linux/lockd/nlm.h> -/* need xdr-encoded error codes too, so... */ -#include <linux/lockd/xdr.h> -#ifdef CONFIG_LOCKD_V4 -#include <linux/lockd/xdr4.h> -#endif - -/* Dummy declarations */ +struct file_lock; +struct nfs_fh; struct svc_rqst; struct rpc_task; struct rpc_clnt; +struct super_block; /* * This is the set of functions for lockd->nfsd communication */ struct nlmsvc_binding { - __be32 (*fopen)(struct svc_rqst *, - struct nfs_fh *, - struct file **, - int mode); - void (*fclose)(struct file *); + int (*fopen)(struct svc_rqst *rqstp, struct nfs_fh *f, + struct file **filp, int flags); + void (*fclose)(struct file *filp); }; extern const struct nlmsvc_binding *nlmsvc_ops; @@ -58,6 +51,7 @@ struct nlmclnt_initdata { extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init); extern void nlmclnt_done(struct nlm_host *host); extern struct rpc_clnt *nlmclnt_rpc_clnt(struct nlm_host *host); +extern void nlmclnt_shutdown_rpc_clnt(struct nlm_host *host); /* * NLM client operations provide a means to modify RPC processing of NLM @@ -82,4 +76,10 @@ extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, vo extern int lockd_up(struct net *net, const struct cred *cred); extern void lockd_down(struct net *net); +/* + * Cluster failover support + */ +int nlmsvc_unlock_all_by_sb(struct super_block *sb); +int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); + #endif /* LINUX_LOCKD_BIND_H */ diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h deleted file mode 100644 index eede2ab5246f..000000000000 --- a/include/linux/lockd/debug.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/linux/lockd/debug.h - * - * Debugging stuff. - * - * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> - */ - -#ifndef LINUX_LOCKD_DEBUG_H -#define LINUX_LOCKD_DEBUG_H - -#include <linux/sunrpc/debug.h> - -/* - * Enable lockd debugging. - * Requires RPC_DEBUG. - */ -#undef ifdebug -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) -# define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) -#else -# define ifdebug(flag) if (0) -#endif - -/* - * Debug flags - */ -#define NLMDBG_SVC 0x0001 -#define NLMDBG_CLIENT 0x0002 -#define NLMDBG_CLNTLOCK 0x0004 -#define NLMDBG_SVCLOCK 0x0008 -#define NLMDBG_MONITOR 0x0010 -#define NLMDBG_CLNTSUBS 0x0020 -#define NLMDBG_SVCSUBS 0x0040 -#define NLMDBG_HOSTCACHE 0x0080 -#define NLMDBG_XDR 0x0100 -#define NLMDBG_ALL 0x7fff - -#endif /* LINUX_LOCKD_DEBUG_H */ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h deleted file mode 100644 index 330e38776bb2..000000000000 --- a/include/linux/lockd/lockd.h +++ /dev/null @@ -1,395 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/linux/lockd/lockd.h - * - * General-purpose lockd include file. - * - * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> - */ - -#ifndef LINUX_LOCKD_LOCKD_H -#define LINUX_LOCKD_LOCKD_H - -/* XXX: a lot of this should really be under fs/lockd. */ - -#include <linux/exportfs.h> -#include <linux/in.h> -#include <linux/in6.h> -#include <net/ipv6.h> -#include <linux/fs.h> -#include <linux/kref.h> -#include <linux/refcount.h> -#include <linux/utsname.h> -#include <linux/lockd/bind.h> -#include <linux/lockd/xdr.h> -#ifdef CONFIG_LOCKD_V4 -#include <linux/lockd/xdr4.h> -#endif -#include <linux/lockd/debug.h> -#include <linux/sunrpc/svc.h> - -/* - * Version string - */ -#define LOCKD_VERSION "0.5" - -/* - * Default timeout for RPC calls (seconds) - */ -#define LOCKD_DFLT_TIMEO 10 - -/* - * Lockd host handle (used both by the client and server personality). - */ -struct nlm_host { - struct hlist_node h_hash; /* doubly linked list */ - struct sockaddr_storage h_addr; /* peer address */ - size_t h_addrlen; - struct sockaddr_storage h_srcaddr; /* our address (optional) */ - size_t h_srcaddrlen; - struct rpc_clnt *h_rpcclnt; /* RPC client to talk to peer */ - char *h_name; /* remote hostname */ - u32 h_version; /* interface version */ - unsigned short h_proto; /* transport proto */ - unsigned short h_reclaiming : 1, - h_server : 1, /* server side, not client side */ - h_noresvport : 1, - h_inuse : 1; - wait_queue_head_t h_gracewait; /* wait while reclaiming */ - struct rw_semaphore h_rwsem; /* Reboot recovery lock */ - u32 h_state; /* pseudo-state counter */ - u32 h_nsmstate; /* true remote NSM state */ - u32 h_pidcount; /* Pseudopids */ - refcount_t h_count; /* reference count */ - struct mutex h_mutex; /* mutex for pmap binding */ - unsigned long h_nextrebind; /* next portmap call */ - unsigned long h_expires; /* eligible for GC */ - struct list_head h_lockowners; /* Lockowners for the client */ - spinlock_t h_lock; - struct list_head h_granted; /* Locks in GRANTED state */ - struct list_head h_reclaim; /* Locks in RECLAIM state */ - struct nsm_handle *h_nsmhandle; /* NSM status handle */ - char *h_addrbuf; /* address eyecatcher */ - struct net *net; /* host net */ - const struct cred *h_cred; - char nodename[UNX_MAXNODENAME + 1]; - const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */ -}; - -/* - * The largest string sm_addrbuf should hold is a full-size IPv6 address - * (no "::" anywhere) with a scope ID. The buffer size is computed to - * hold eight groups of colon-separated four-hex-digit numbers, a - * percent sign, a scope id (at most 32 bits, in decimal), and NUL. - */ -#define NSM_ADDRBUF ((8 * 4 + 7) + (1 + 10) + 1) - -struct nsm_handle { - struct list_head sm_link; - refcount_t sm_count; - char *sm_mon_name; - char *sm_name; - struct sockaddr_storage sm_addr; - size_t sm_addrlen; - unsigned int sm_monitored : 1, - sm_sticky : 1; /* don't unmonitor */ - struct nsm_private sm_priv; - char sm_addrbuf[NSM_ADDRBUF]; -}; - -/* - * Rigorous type checking on sockaddr type conversions - */ -static inline struct sockaddr *nlm_addr(const struct nlm_host *host) -{ - return (struct sockaddr *)&host->h_addr; -} - -static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host) -{ - return (struct sockaddr *)&host->h_srcaddr; -} - -/* - * Map an fl_owner_t into a unique 32-bit "pid" - */ -struct nlm_lockowner { - struct list_head list; - refcount_t count; - - struct nlm_host *host; - fl_owner_t owner; - uint32_t pid; -}; - -/* - * This is the representation of a blocked client lock. - */ -struct nlm_wait { - struct list_head b_list; /* linked list */ - wait_queue_head_t b_wait; /* where to wait on */ - struct nlm_host *b_host; - struct file_lock *b_lock; /* local file lock */ - __be32 b_status; /* grant callback status */ -}; - -/* - * Memory chunk for NLM client RPC request. - */ -#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) -struct nlm_rqst { - refcount_t a_count; - unsigned int a_flags; /* initial RPC task flags */ - struct nlm_host * a_host; /* host handle */ - struct nlm_args a_args; /* arguments */ - struct nlm_res a_res; /* result */ - struct nlm_block * a_block; - unsigned int a_retries; /* Retry count */ - u8 a_owner[NLMCLNT_OHSIZE]; - void * a_callback_data; /* sent to nlmclnt_operations callbacks */ -}; - -/* - * This struct describes a file held open by lockd on behalf of - * an NFS client. - */ -struct nlm_file { - struct hlist_node f_list; /* linked list */ - struct nfs_fh f_handle; /* NFS file handle */ - struct file * f_file[2]; /* VFS file pointers, - indexed by O_ flags */ - struct nlm_share * f_shares; /* DOS shares */ - struct list_head f_blocks; /* blocked locks */ - unsigned int f_locks; /* guesstimate # of locks */ - unsigned int f_count; /* reference count */ - struct mutex f_mutex; /* avoid concurrent access */ -}; - -/* - * This is a server block (i.e. a lock requested by some client which - * couldn't be granted because of a conflicting lock). - */ -#define NLM_NEVER (~(unsigned long) 0) -/* timeout on non-blocking call: */ -#define NLM_TIMEOUT (7 * HZ) - -struct nlm_block { - struct kref b_count; /* Reference count */ - struct list_head b_list; /* linked list of all blocks */ - struct list_head b_flist; /* linked list (per file) */ - struct nlm_rqst * b_call; /* RPC args & callback info */ - struct svc_serv * b_daemon; /* NLM service */ - struct nlm_host * b_host; /* host handle for RPC clnt */ - unsigned long b_when; /* next re-xmit */ - unsigned int b_id; /* block id */ - unsigned char b_granted; /* VFS granted lock */ - struct nlm_file * b_file; /* file in question */ - struct cache_req * b_cache_req; /* deferred request handling */ - struct cache_deferred_req * b_deferred_req; - unsigned int b_flags; /* block flags */ -#define B_QUEUED 1 /* lock queued */ -#define B_GOT_CALLBACK 2 /* got lock or conflicting lock */ -#define B_TIMED_OUT 4 /* filesystem too slow to respond */ -}; - -/* - * Global variables - */ -extern const struct rpc_program nlm_program; -extern const struct svc_procedure nlmsvc_procedures[24]; -#ifdef CONFIG_LOCKD_V4 -extern const struct svc_procedure nlmsvc_procedures4[24]; -#endif -extern int nlmsvc_grace_period; -extern unsigned long nlm_timeout; -extern bool nsm_use_hostnames; -extern u32 nsm_local_state; - -extern struct timer_list nlmsvc_retry; - -/* - * Lockd client functions - */ -struct nlm_rqst * nlm_alloc_call(struct nlm_host *host); -int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *); -int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *); -void nlmclnt_release_call(struct nlm_rqst *); -void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host, - struct file_lock *fl); -void nlmclnt_queue_block(struct nlm_wait *block); -__be32 nlmclnt_dequeue_block(struct nlm_wait *block); -int nlmclnt_wait(struct nlm_wait *block, struct nlm_rqst *req, long timeout); -__be32 nlmclnt_grant(const struct sockaddr *addr, - const struct nlm_lock *lock); -void nlmclnt_recovery(struct nlm_host *); -int nlmclnt_reclaim(struct nlm_host *, struct file_lock *, - struct nlm_rqst *); -void nlmclnt_next_cookie(struct nlm_cookie *); - -/* - * Host cache - */ -struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, - const size_t salen, - const unsigned short protocol, - const u32 version, - const char *hostname, - int noresvport, - struct net *net, - const struct cred *cred); -void nlmclnt_release_host(struct nlm_host *); -struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, - const char *hostname, - const size_t hostname_len); -void nlmsvc_release_host(struct nlm_host *); -struct rpc_clnt * nlm_bind_host(struct nlm_host *); -void nlm_rebind_host(struct nlm_host *); -struct nlm_host * nlm_get_host(struct nlm_host *); -void nlm_shutdown_hosts(void); -void nlm_shutdown_hosts_net(struct net *net); -void nlm_host_rebooted(const struct net *net, - const struct nlm_reboot *); - -/* - * Host monitoring - */ -int nsm_monitor(const struct nlm_host *host); -void nsm_unmonitor(const struct nlm_host *host); - -struct nsm_handle *nsm_get_handle(const struct net *net, - const struct sockaddr *sap, - const size_t salen, - const char *hostname, - const size_t hostname_len); -struct nsm_handle *nsm_reboot_lookup(const struct net *net, - const struct nlm_reboot *info); -void nsm_release(struct nsm_handle *nsm); - -/* - * This is used in garbage collection and resource reclaim - * A return value != 0 means destroy the lock/block/share - */ -typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref); - -/* - * Server-side lock handling - */ -int lock_to_openmode(struct file_lock *); -__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, - struct nlm_host *, struct nlm_lock *, int, - struct nlm_cookie *, int); -__be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *); -__be32 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, - struct nlm_host *host, struct nlm_lock *lock, - struct nlm_lock *conflock); -__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *); -void nlmsvc_retry_blocked(struct svc_rqst *rqstp); -void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, - nlm_host_match_fn_t match); -void nlmsvc_grant_reply(struct nlm_cookie *, __be32); -void nlmsvc_release_call(struct nlm_rqst *); -void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t); - -/* - * File handling for the server personality - */ -__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, - struct nlm_lock *); -void nlm_release_file(struct nlm_file *); -void nlmsvc_put_lockowner(struct nlm_lockowner *); -void nlmsvc_release_lockowner(struct nlm_lock *); -void nlmsvc_mark_resources(struct net *); -void nlmsvc_free_host_resources(struct nlm_host *); -void nlmsvc_invalidate_all(void); - -/* - * Cluster failover support - */ -int nlmsvc_unlock_all_by_sb(struct super_block *sb); -int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); - -static inline struct file *nlmsvc_file_file(const struct nlm_file *file) -{ - return file->f_file[O_RDONLY] ? - file->f_file[O_RDONLY] : file->f_file[O_WRONLY]; -} - -static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) -{ - return file_inode(nlmsvc_file_file(file)); -} - -static inline bool -nlmsvc_file_cannot_lock(const struct nlm_file *file) -{ - return exportfs_cannot_lock(nlmsvc_file_file(file)->f_path.dentry->d_sb->s_export_op); -} - -static inline int __nlm_privileged_request4(const struct sockaddr *sap) -{ - const struct sockaddr_in *sin = (struct sockaddr_in *)sap; - - if (ntohs(sin->sin_port) > 1023) - return 0; - - return ipv4_is_loopback(sin->sin_addr.s_addr); -} - -#if IS_ENABLED(CONFIG_IPV6) -static inline int __nlm_privileged_request6(const struct sockaddr *sap) -{ - const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; - - if (ntohs(sin6->sin6_port) > 1023) - return 0; - - if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) - return ipv4_is_loopback(sin6->sin6_addr.s6_addr32[3]); - - return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; -} -#else /* IS_ENABLED(CONFIG_IPV6) */ -static inline int __nlm_privileged_request6(const struct sockaddr *sap) -{ - return 0; -} -#endif /* IS_ENABLED(CONFIG_IPV6) */ - -/* - * Ensure incoming requests are from local privileged callers. - * - * Return TRUE if sender is local and is connecting via a privileged port; - * otherwise return FALSE. - */ -static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) -{ - const struct sockaddr *sap = svc_addr(rqstp); - - switch (sap->sa_family) { - case AF_INET: - return __nlm_privileged_request4(sap); - case AF_INET6: - return __nlm_privileged_request6(sap); - default: - return 0; - } -} - -/* - * Compare two NLM locks. - * When the second lock is of type F_UNLCK, this acts like a wildcard. - */ -static inline int nlm_compare_locks(const struct file_lock *fl1, - const struct file_lock *fl2) -{ - return file_inode(fl1->c.flc_file) == file_inode(fl2->c.flc_file) - && fl1->c.flc_pid == fl2->c.flc_pid - && fl1->c.flc_owner == fl2->c.flc_owner - && fl1->fl_start == fl2->fl_start - && fl1->fl_end == fl2->fl_end - &&(fl1->c.flc_type == fl2->c.flc_type || fl2->c.flc_type == F_UNLCK); -} - -extern const struct lock_manager_operations nlmsvc_lock_operations; - -#endif /* LINUX_LOCKD_LOCKD_H */ diff --git a/include/linux/lockd/nlm.h b/include/linux/lockd/nlm.h deleted file mode 100644 index 6e343ef760dc..000000000000 --- a/include/linux/lockd/nlm.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/linux/lockd/nlm.h - * - * Declarations for the Network Lock Manager protocol. - * - * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> - */ - -#ifndef LINUX_LOCKD_NLM_H -#define LINUX_LOCKD_NLM_H - - -/* Maximum file offset in file_lock.fl_end */ -# define NLM_OFFSET_MAX ((s32) 0x7fffffff) -# define NLM4_OFFSET_MAX ((s64) ((~(u64)0) >> 1)) - -/* Return states for NLM */ -enum { - NLM_LCK_GRANTED = 0, - NLM_LCK_DENIED = 1, - NLM_LCK_DENIED_NOLOCKS = 2, - NLM_LCK_BLOCKED = 3, - NLM_LCK_DENIED_GRACE_PERIOD = 4, -#ifdef CONFIG_LOCKD_V4 - NLM_DEADLCK = 5, - NLM_ROFS = 6, - NLM_STALE_FH = 7, - NLM_FBIG = 8, - NLM_FAILED = 9, -#endif -}; - -#define NLM_PROGRAM 100021 - -#define NLMPROC_NULL 0 -#define NLMPROC_TEST 1 -#define NLMPROC_LOCK 2 -#define NLMPROC_CANCEL 3 -#define NLMPROC_UNLOCK 4 -#define NLMPROC_GRANTED 5 -#define NLMPROC_TEST_MSG 6 -#define NLMPROC_LOCK_MSG 7 -#define NLMPROC_CANCEL_MSG 8 -#define NLMPROC_UNLOCK_MSG 9 -#define NLMPROC_GRANTED_MSG 10 -#define NLMPROC_TEST_RES 11 -#define NLMPROC_LOCK_RES 12 -#define NLMPROC_CANCEL_RES 13 -#define NLMPROC_UNLOCK_RES 14 -#define NLMPROC_GRANTED_RES 15 -#define NLMPROC_NSM_NOTIFY 16 /* statd callback */ -#define NLMPROC_SHARE 20 -#define NLMPROC_UNSHARE 21 -#define NLMPROC_NM_LOCK 22 -#define NLMPROC_FREE_ALL 23 - -#endif /* LINUX_LOCKD_NLM_H */ diff --git a/include/linux/lockd/share.h b/include/linux/lockd/share.h deleted file mode 100644 index 1f18a9faf645..000000000000 --- a/include/linux/lockd/share.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/linux/lockd/share.h - * - * DOS share management for lockd. - * - * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> - */ - -#ifndef LINUX_LOCKD_SHARE_H -#define LINUX_LOCKD_SHARE_H - -/* - * DOS share for a specific file - */ -struct nlm_share { - struct nlm_share * s_next; /* linked list */ - struct nlm_host * s_host; /* client host */ - struct nlm_file * s_file; /* shared file */ - struct xdr_netobj s_owner; /* owner handle */ - u32 s_access; /* access mode */ - u32 s_mode; /* deny mode */ -}; - -__be32 nlmsvc_share_file(struct nlm_host *, struct nlm_file *, - struct nlm_args *); -__be32 nlmsvc_unshare_file(struct nlm_host *, struct nlm_file *, - struct nlm_args *); -void nlmsvc_traverse_shares(struct nlm_host *, struct nlm_file *, - nlm_host_match_fn_t); - -#endif /* LINUX_LOCKD_SHARE_H */ diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h deleted file mode 100644 index 17d53165d9f2..000000000000 --- a/include/linux/lockd/xdr.h +++ /dev/null @@ -1,115 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/linux/lockd/xdr.h - * - * XDR types for the NLM protocol - * - * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> - */ - -#ifndef LOCKD_XDR_H -#define LOCKD_XDR_H - -#include <linux/fs.h> -#include <linux/filelock.h> -#include <linux/nfs.h> -#include <linux/sunrpc/xdr.h> - -#define SM_MAXSTRLEN 1024 -#define SM_PRIV_SIZE 16 - -struct nsm_private { - unsigned char data[SM_PRIV_SIZE]; -}; - -struct svc_rqst; - -#define NLM_MAXCOOKIELEN 32 -#define NLM_MAXSTRLEN 1024 - -#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED) -#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED) -#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS) -#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED) -#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD) - -#define nlm_drop_reply cpu_to_be32(30000) - -/* Lock info passed via NLM */ -struct nlm_lock { - char * caller; - unsigned int len; /* length of "caller" */ - struct nfs_fh fh; - struct xdr_netobj oh; - u32 svid; - u64 lock_start; - u64 lock_len; - struct file_lock fl; -}; - -/* - * NLM cookies. Technically they can be 1K, but Linux only uses 8 bytes. - * FreeBSD uses 16, Apple Mac OS X 10.3 uses 20. Therefore we set it to - * 32 bytes. - */ - -struct nlm_cookie -{ - unsigned char data[NLM_MAXCOOKIELEN]; - unsigned int len; -}; - -/* - * Generic lockd arguments for all but sm_notify - */ -struct nlm_args { - struct nlm_cookie cookie; - struct nlm_lock lock; - u32 block; - u32 reclaim; - u32 state; - u32 monitor; - u32 fsm_access; - u32 fsm_mode; -}; - -/* - * Generic lockd result - */ -struct nlm_res { - struct nlm_cookie cookie; - __be32 status; - struct nlm_lock lock; -}; - -/* - * statd callback when client has rebooted - */ -struct nlm_reboot { - char *mon; - unsigned int len; - u32 state; - struct nsm_private priv; -}; - -/* - * Contents of statd callback when monitored host rebooted - */ -#define NLMSVC_XDRSIZE sizeof(struct nlm_args) - -bool nlmsvc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr); - -bool nlmsvc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlmsvc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr); - -#endif /* LOCKD_XDR_H */ diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h deleted file mode 100644 index 72831e35dca3..000000000000 --- a/include/linux/lockd/xdr4.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/linux/lockd/xdr4.h - * - * XDR types for the NLM protocol - * - * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> - */ - -#ifndef LOCKD_XDR4_H -#define LOCKD_XDR4_H - -#include <linux/fs.h> -#include <linux/nfs.h> -#include <linux/sunrpc/xdr.h> -#include <linux/lockd/xdr.h> - -/* error codes new to NLMv4 */ -#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK) -#define nlm4_rofs cpu_to_be32(NLM_ROFS) -#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH) -#define nlm4_fbig cpu_to_be32(NLM_FBIG) -#define nlm4_failed cpu_to_be32(NLM_FAILED) - -void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len); -bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr); - -bool nlm4svc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr); -bool nlm4svc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr); - -extern const struct rpc_version nlm_version4; - -#endif /* LOCKD_XDR4_H */ diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h deleted file mode 100644 index 00758f45fddc..000000000000 --- a/include/linux/mISDNdsp.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __mISDNdsp_H__ -#define __mISDNdsp_H__ - -struct mISDN_dsp_element_arg { - char *name; - char *def; - char *desc; -}; - -struct mISDN_dsp_element { - char *name; - void *(*new)(const char *arg); - void (*free)(void *p); - void (*process_tx)(void *p, unsigned char *data, int len); - void (*process_rx)(void *p, unsigned char *data, int len, - unsigned int txlen); - int num_args; - struct mISDN_dsp_element_arg - *args; -}; - -extern int mISDN_dsp_element_register(struct mISDN_dsp_element *elem); -extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem); - -struct dsp_features { - int hfc_id; /* unique id to identify the chip (or -1) */ - int hfc_dtmf; /* set if HFCmulti card supports dtmf */ - int hfc_conf; /* set if HFCmulti card supports conferences */ - int hfc_loops; /* set if card supports tone loops */ - int hfc_echocanhw; /* set if card supports echocancelation*/ - int pcm_id; /* unique id to identify the pcm bus (or -1) */ - int pcm_slots; /* number of slots on the pcm bus */ - int pcm_banks; /* number of IO banks of pcm bus */ - int unclocked; /* data is not clocked (has jitter/loss) */ - int unordered; /* data is unordered (packets have index) */ -}; - -#endif - diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h deleted file mode 100644 index ef4f8eb02eac..000000000000 --- a/include/linux/mISDNhw.h +++ /dev/null @@ -1,192 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * - * Author Karsten Keil <kkeil@novell.com> - * - * Basic declarations for the mISDN HW channels - * - * Copyright 2008 by Karsten Keil <kkeil@novell.com> - */ - -#ifndef MISDNHW_H -#define MISDNHW_H -#include <linux/mISDNif.h> -#include <linux/timer.h> - -/* - * HW DEBUG 0xHHHHGGGG - * H - hardware driver specific bits - * G - for all drivers - */ - -#define DEBUG_HW 0x00000001 -#define DEBUG_HW_OPEN 0x00000002 -#define DEBUG_HW_DCHANNEL 0x00000100 -#define DEBUG_HW_DFIFO 0x00000200 -#define DEBUG_HW_BCHANNEL 0x00001000 -#define DEBUG_HW_BFIFO 0x00002000 - -#define MAX_DFRAME_LEN_L1 300 -#define MAX_MON_FRAME 32 -#define MAX_LOG_SPACE 2048 -#define MISDN_COPY_SIZE 32 - -/* channel->Flags bit field */ -#define FLG_TX_BUSY 0 /* tx_buf in use */ -#define FLG_TX_NEXT 1 /* next_skb in use */ -#define FLG_L1_BUSY 2 /* L1 is permanent busy */ -#define FLG_L2_ACTIVATED 3 /* activated from L2 */ -#define FLG_OPEN 5 /* channel is in use */ -#define FLG_ACTIVE 6 /* channel is activated */ -#define FLG_BUSY_TIMER 7 -/* channel type */ -#define FLG_DCHANNEL 8 /* channel is D-channel */ -#define FLG_BCHANNEL 9 /* channel is B-channel */ -#define FLG_ECHANNEL 10 /* channel is E-channel */ -#define FLG_TRANSPARENT 12 /* channel use transparent data */ -#define FLG_HDLC 13 /* channel use hdlc data */ -#define FLG_L2DATA 14 /* channel use L2 DATA primitivs */ -#define FLG_ORIGIN 15 /* channel is on origin site */ -/* channel specific stuff */ -#define FLG_FILLEMPTY 16 /* fill fifo on first frame (empty) */ -/* arcofi specific */ -#define FLG_ARCOFI_TIMER 17 -#define FLG_ARCOFI_ERROR 18 -/* isar specific */ -#define FLG_INITIALIZED 17 -#define FLG_DLEETX 18 -#define FLG_LASTDLE 19 -#define FLG_FIRST 20 -#define FLG_LASTDATA 21 -#define FLG_NMD_DATA 22 -#define FLG_FTI_RUN 23 -#define FLG_LL_OK 24 -#define FLG_LL_CONN 25 -#define FLG_DTMFSEND 26 -#define FLG_TX_EMPTY 27 -/* stop sending received data upstream */ -#define FLG_RX_OFF 28 -/* workq events */ -#define FLG_RECVQUEUE 30 -#define FLG_PHCHANGE 31 - -#define schedule_event(s, ev) do { \ - test_and_set_bit(ev, &((s)->Flags)); \ - schedule_work(&((s)->workq)); \ - } while (0) - -struct dchannel { - struct mISDNdevice dev; - u_long Flags; - struct work_struct workq; - void (*phfunc) (struct dchannel *); - u_int state; - void *l1; - void *hw; - int slot; /* multiport card channel slot */ - struct timer_list timer; - /* receive data */ - struct sk_buff *rx_skb; - int maxlen; - /* send data */ - struct sk_buff_head squeue; - struct sk_buff_head rqueue; - struct sk_buff *tx_skb; - int tx_idx; - int debug; - /* statistics */ - int err_crc; - int err_tx; - int err_rx; -}; - -typedef int (dchannel_l1callback)(struct dchannel *, u_int); -extern int create_l1(struct dchannel *, dchannel_l1callback *); - -/* private L1 commands */ -#define INFO0 0x8002 -#define INFO1 0x8102 -#define INFO2 0x8202 -#define INFO3_P8 0x8302 -#define INFO3_P10 0x8402 -#define INFO4_P8 0x8502 -#define INFO4_P10 0x8602 -#define LOSTFRAMING 0x8702 -#define ANYSIGNAL 0x8802 -#define HW_POWERDOWN 0x8902 -#define HW_RESET_REQ 0x8a02 -#define HW_POWERUP_REQ 0x8b02 -#define HW_DEACT_REQ 0x8c02 -#define HW_ACTIVATE_REQ 0x8e02 -#define HW_D_NOBLOCKED 0x8f02 -#define HW_RESET_IND 0x9002 -#define HW_POWERUP_IND 0x9102 -#define HW_DEACT_IND 0x9202 -#define HW_ACTIVATE_IND 0x9302 -#define HW_DEACT_CNF 0x9402 -#define HW_TESTLOOP 0x9502 -#define HW_TESTRX_RAW 0x9602 -#define HW_TESTRX_HDLC 0x9702 -#define HW_TESTRX_OFF 0x9802 -#define HW_TIMER3_IND 0x9902 -#define HW_TIMER3_VALUE 0x9a00 -#define HW_TIMER3_VMASK 0x00FF - -struct layer1; -extern int l1_event(struct layer1 *, u_int); - -#define MISDN_BCH_FILL_SIZE 4 - -struct bchannel { - struct mISDNchannel ch; - int nr; - u_long Flags; - struct work_struct workq; - u_int state; - void *hw; - int slot; /* multiport card channel slot */ - struct timer_list timer; - /* receive data */ - u8 fill[MISDN_BCH_FILL_SIZE]; - struct sk_buff *rx_skb; - unsigned short maxlen; - unsigned short init_maxlen; /* initial value */ - unsigned short next_maxlen; /* pending value */ - unsigned short minlen; /* for transparent data */ - unsigned short init_minlen; /* initial value */ - unsigned short next_minlen; /* pending value */ - /* send data */ - struct sk_buff *next_skb; - struct sk_buff *tx_skb; - struct sk_buff_head rqueue; - int rcount; - int tx_idx; - int debug; - /* statistics */ - int err_crc; - int err_tx; - int err_rx; - int dropcnt; -}; - -extern int mISDN_initdchannel(struct dchannel *, int, void *); -extern int mISDN_initbchannel(struct bchannel *, unsigned short, - unsigned short); -extern int mISDN_freedchannel(struct dchannel *); -extern void mISDN_clear_bchannel(struct bchannel *); -extern void mISDN_freebchannel(struct bchannel *); -extern int mISDN_ctrl_bchannel(struct bchannel *, struct mISDN_ctrl_req *); -extern void queue_ch_frame(struct mISDNchannel *, u_int, - int, struct sk_buff *); -extern int dchannel_senddata(struct dchannel *, struct sk_buff *); -extern int bchannel_senddata(struct bchannel *, struct sk_buff *); -extern int bchannel_get_rxbuf(struct bchannel *, int); -extern void recv_Dchannel(struct dchannel *); -extern void recv_Echannel(struct dchannel *, struct dchannel *); -extern void recv_Bchannel(struct bchannel *, unsigned int, bool); -extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); -extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); -extern int get_next_bframe(struct bchannel *); -extern int get_next_dframe(struct dchannel *); - -#endif diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h deleted file mode 100644 index 7aab4a769736..000000000000 --- a/include/linux/mISDNif.h +++ /dev/null @@ -1,603 +0,0 @@ -/* - * - * Author Karsten Keil <kkeil@novell.com> - * - * Copyright 2008 by Karsten Keil <kkeil@novell.com> - * - * This code is free software; you can redistribute it and/or modify - * it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE - * version 2.1 as published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU LESSER GENERAL PUBLIC LICENSE for more details. - * - */ - -#ifndef mISDNIF_H -#define mISDNIF_H - -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/socket.h> - -/* - * ABI Version 32 bit - * - * <8 bit> Major version - * - changed if any interface become backwards incompatible - * - * <8 bit> Minor version - * - changed if any interface is extended but backwards compatible - * - * <16 bit> Release number - * - should be incremented on every checkin - */ -#define MISDN_MAJOR_VERSION 1 -#define MISDN_MINOR_VERSION 1 -#define MISDN_RELEASE 29 - -/* primitives for information exchange - * generell format - * <16 bit 0 > - * <8 bit command> - * BIT 8 = 1 LAYER private - * BIT 7 = 1 answer - * BIT 6 = 1 DATA - * <8 bit target layer mask> - * - * Layer = 00 is reserved for general commands - Layer = 01 L2 -> HW - Layer = 02 HW -> L2 - Layer = 04 L3 -> L2 - Layer = 08 L2 -> L3 - * Layer = FF is reserved for broadcast commands - */ - -#define MISDN_CMDMASK 0xff00 -#define MISDN_LAYERMASK 0x00ff - -/* generell commands */ -#define OPEN_CHANNEL 0x0100 -#define CLOSE_CHANNEL 0x0200 -#define CONTROL_CHANNEL 0x0300 -#define CHECK_DATA 0x0400 - -/* layer 2 -> layer 1 */ -#define PH_ACTIVATE_REQ 0x0101 -#define PH_DEACTIVATE_REQ 0x0201 -#define PH_DATA_REQ 0x2001 -#define MPH_ACTIVATE_REQ 0x0501 -#define MPH_DEACTIVATE_REQ 0x0601 -#define MPH_INFORMATION_REQ 0x0701 -#define PH_CONTROL_REQ 0x0801 - -/* layer 1 -> layer 2 */ -#define PH_ACTIVATE_IND 0x0102 -#define PH_ACTIVATE_CNF 0x4102 -#define PH_DEACTIVATE_IND 0x0202 -#define PH_DEACTIVATE_CNF 0x4202 -#define PH_DATA_IND 0x2002 -#define PH_DATA_E_IND 0x3002 -#define MPH_ACTIVATE_IND 0x0502 -#define MPH_DEACTIVATE_IND 0x0602 -#define MPH_INFORMATION_IND 0x0702 -#define PH_DATA_CNF 0x6002 -#define PH_CONTROL_IND 0x0802 -#define PH_CONTROL_CNF 0x4802 - -/* layer 3 -> layer 2 */ -#define DL_ESTABLISH_REQ 0x1004 -#define DL_RELEASE_REQ 0x1104 -#define DL_DATA_REQ 0x3004 -#define DL_UNITDATA_REQ 0x3104 -#define DL_INFORMATION_REQ 0x0004 - -/* layer 2 -> layer 3 */ -#define DL_ESTABLISH_IND 0x1008 -#define DL_ESTABLISH_CNF 0x5008 -#define DL_RELEASE_IND 0x1108 -#define DL_RELEASE_CNF 0x5108 -#define DL_DATA_IND 0x3008 -#define DL_UNITDATA_IND 0x3108 -#define DL_INFORMATION_IND 0x0008 - -/* intern layer 2 management */ -#define MDL_ASSIGN_REQ 0x1804 -#define MDL_ASSIGN_IND 0x1904 -#define MDL_REMOVE_REQ 0x1A04 -#define MDL_REMOVE_IND 0x1B04 -#define MDL_STATUS_UP_IND 0x1C04 -#define MDL_STATUS_DOWN_IND 0x1D04 -#define MDL_STATUS_UI_IND 0x1E04 -#define MDL_ERROR_IND 0x1F04 -#define MDL_ERROR_RSP 0x5F04 - -/* intern layer 2 */ -#define DL_TIMER200_IND 0x7004 -#define DL_TIMER203_IND 0x7304 -#define DL_INTERN_MSG 0x7804 - -/* DL_INFORMATION_IND types */ -#define DL_INFO_L2_CONNECT 0x0001 -#define DL_INFO_L2_REMOVED 0x0002 - -/* PH_CONTROL types */ -/* TOUCH TONE IS 0x20XX XX "0"..."9", "A","B","C","D","*","#" */ -#define DTMF_TONE_VAL 0x2000 -#define DTMF_TONE_MASK 0x007F -#define DTMF_TONE_START 0x2100 -#define DTMF_TONE_STOP 0x2200 -#define DTMF_HFC_COEF 0x4000 -#define DSP_CONF_JOIN 0x2403 -#define DSP_CONF_SPLIT 0x2404 -#define DSP_RECEIVE_OFF 0x2405 -#define DSP_RECEIVE_ON 0x2406 -#define DSP_ECHO_ON 0x2407 -#define DSP_ECHO_OFF 0x2408 -#define DSP_MIX_ON 0x2409 -#define DSP_MIX_OFF 0x240a -#define DSP_DELAY 0x240b -#define DSP_JITTER 0x240c -#define DSP_TXDATA_ON 0x240d -#define DSP_TXDATA_OFF 0x240e -#define DSP_TX_DEJITTER 0x240f -#define DSP_TX_DEJ_OFF 0x2410 -#define DSP_TONE_PATT_ON 0x2411 -#define DSP_TONE_PATT_OFF 0x2412 -#define DSP_VOL_CHANGE_TX 0x2413 -#define DSP_VOL_CHANGE_RX 0x2414 -#define DSP_BF_ENABLE_KEY 0x2415 -#define DSP_BF_DISABLE 0x2416 -#define DSP_BF_ACCEPT 0x2416 -#define DSP_BF_REJECT 0x2417 -#define DSP_PIPELINE_CFG 0x2418 -#define HFC_VOL_CHANGE_TX 0x2601 -#define HFC_VOL_CHANGE_RX 0x2602 -#define HFC_SPL_LOOP_ON 0x2603 -#define HFC_SPL_LOOP_OFF 0x2604 -/* for T30 FAX and analog modem */ -#define HW_MOD_FRM 0x4000 -#define HW_MOD_FRH 0x4001 -#define HW_MOD_FTM 0x4002 -#define HW_MOD_FTH 0x4003 -#define HW_MOD_FTS 0x4004 -#define HW_MOD_CONNECT 0x4010 -#define HW_MOD_OK 0x4011 -#define HW_MOD_NOCARR 0x4012 -#define HW_MOD_FCERROR 0x4013 -#define HW_MOD_READY 0x4014 -#define HW_MOD_LASTDATA 0x4015 - -/* DSP_TONE_PATT_ON parameter */ -#define TONE_OFF 0x0000 -#define TONE_GERMAN_DIALTONE 0x0001 -#define TONE_GERMAN_OLDDIALTONE 0x0002 -#define TONE_AMERICAN_DIALTONE 0x0003 -#define TONE_GERMAN_DIALPBX 0x0004 -#define TONE_GERMAN_OLDDIALPBX 0x0005 -#define TONE_AMERICAN_DIALPBX 0x0006 -#define TONE_GERMAN_RINGING 0x0007 -#define TONE_GERMAN_OLDRINGING 0x0008 -#define TONE_AMERICAN_RINGPBX 0x000b -#define TONE_GERMAN_RINGPBX 0x000c -#define TONE_GERMAN_OLDRINGPBX 0x000d -#define TONE_AMERICAN_RINGING 0x000e -#define TONE_GERMAN_BUSY 0x000f -#define TONE_GERMAN_OLDBUSY 0x0010 -#define TONE_AMERICAN_BUSY 0x0011 -#define TONE_GERMAN_HANGUP 0x0012 -#define TONE_GERMAN_OLDHANGUP 0x0013 -#define TONE_AMERICAN_HANGUP 0x0014 -#define TONE_SPECIAL_INFO 0x0015 -#define TONE_GERMAN_GASSENBESETZT 0x0016 -#define TONE_GERMAN_AUFSCHALTTON 0x0016 - -/* MPH_INFORMATION_IND */ -#define L1_SIGNAL_LOS_OFF 0x0010 -#define L1_SIGNAL_LOS_ON 0x0011 -#define L1_SIGNAL_AIS_OFF 0x0012 -#define L1_SIGNAL_AIS_ON 0x0013 -#define L1_SIGNAL_RDI_OFF 0x0014 -#define L1_SIGNAL_RDI_ON 0x0015 -#define L1_SIGNAL_SLIP_RX 0x0020 -#define L1_SIGNAL_SLIP_TX 0x0021 - -/* - * protocol ids - * D channel 1-31 - * B channel 33 - 63 - */ - -#define ISDN_P_NONE 0 -#define ISDN_P_BASE 0 -#define ISDN_P_TE_S0 0x01 -#define ISDN_P_NT_S0 0x02 -#define ISDN_P_TE_E1 0x03 -#define ISDN_P_NT_E1 0x04 -#define ISDN_P_TE_UP0 0x05 -#define ISDN_P_NT_UP0 0x06 - -#define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \ - (p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE)) -#define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \ - (p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT)) -#define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0)) -#define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1)) -#define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0)) - - -#define ISDN_P_LAPD_TE 0x10 -#define ISDN_P_LAPD_NT 0x11 - -#define ISDN_P_B_MASK 0x1f -#define ISDN_P_B_START 0x20 - -#define ISDN_P_B_RAW 0x21 -#define ISDN_P_B_HDLC 0x22 -#define ISDN_P_B_X75SLP 0x23 -#define ISDN_P_B_L2DTMF 0x24 -#define ISDN_P_B_L2DSP 0x25 -#define ISDN_P_B_L2DSPHDLC 0x26 -#define ISDN_P_B_T30_FAX 0x27 -#define ISDN_P_B_MODEM_ASYNC 0x28 - -#define OPTION_L2_PMX 1 -#define OPTION_L2_PTP 2 -#define OPTION_L2_FIXEDTEI 3 -#define OPTION_L2_CLEANUP 4 -#define OPTION_L1_HOLD 5 - -/* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */ -#define MISDN_MAX_IDLEN 20 - -struct mISDNhead { - unsigned int prim; - unsigned int id; -} __packed; - -#define MISDN_HEADER_LEN sizeof(struct mISDNhead) -#define MAX_DATA_SIZE 2048 -#define MAX_DATA_MEM (MAX_DATA_SIZE + MISDN_HEADER_LEN) -#define MAX_DFRAME_LEN 260 - -#define MISDN_ID_ADDR_MASK 0xFFFF -#define MISDN_ID_TEI_MASK 0xFF00 -#define MISDN_ID_SAPI_MASK 0x00FF -#define MISDN_ID_TEI_ANY 0x7F00 - -#define MISDN_ID_ANY 0xFFFF -#define MISDN_ID_NONE 0xFFFE - -#define GROUP_TEI 127 -#define TEI_SAPI 63 -#define CTRL_SAPI 0 - -#define MISDN_MAX_CHANNEL 127 -#define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) - -#define SOL_MISDN 0 - -struct sockaddr_mISDN { - sa_family_t family; - unsigned char dev; - unsigned char channel; - unsigned char sapi; - unsigned char tei; -}; - -struct mISDNversion { - unsigned char major; - unsigned char minor; - unsigned short release; -}; - -struct mISDN_devinfo { - u_int id; - u_int Dprotocols; - u_int Bprotocols; - u_int protocol; - u_char channelmap[MISDN_CHMAP_SIZE]; - u_int nrbchan; - char name[MISDN_MAX_IDLEN]; -}; - -struct mISDN_devrename { - u_int id; - char name[MISDN_MAX_IDLEN]; /* new name */ -}; - -/* MPH_INFORMATION_REQ payload */ -struct ph_info_ch { - __u32 protocol; - __u64 Flags; -}; - -struct ph_info_dch { - struct ph_info_ch ch; - __u16 state; - __u16 num_bch; -}; - -struct ph_info { - struct ph_info_dch dch; - struct ph_info_ch bch[]; -}; - -/* timer device ioctl */ -#define IMADDTIMER _IOR('I', 64, int) -#define IMDELTIMER _IOR('I', 65, int) - -/* socket ioctls */ -#define IMGETVERSION _IOR('I', 66, int) -#define IMGETCOUNT _IOR('I', 67, int) -#define IMGETDEVINFO _IOR('I', 68, int) -#define IMCTRLREQ _IOR('I', 69, int) -#define IMCLEAR_L2 _IOR('I', 70, int) -#define IMSETDEVNAME _IOR('I', 71, struct mISDN_devrename) -#define IMHOLD_L1 _IOR('I', 72, int) - -static inline int -test_channelmap(u_int nr, u_char *map) -{ - if (nr <= MISDN_MAX_CHANNEL) - return map[nr >> 3] & (1 << (nr & 7)); - else - return 0; -} - -static inline void -set_channelmap(u_int nr, u_char *map) -{ - map[nr >> 3] |= (1 << (nr & 7)); -} - -static inline void -clear_channelmap(u_int nr, u_char *map) -{ - map[nr >> 3] &= ~(1 << (nr & 7)); -} - -/* CONTROL_CHANNEL parameters */ -#define MISDN_CTRL_GETOP 0x0000 -#define MISDN_CTRL_LOOP 0x0001 -#define MISDN_CTRL_CONNECT 0x0002 -#define MISDN_CTRL_DISCONNECT 0x0004 -#define MISDN_CTRL_RX_BUFFER 0x0008 -#define MISDN_CTRL_PCMCONNECT 0x0010 -#define MISDN_CTRL_PCMDISCONNECT 0x0020 -#define MISDN_CTRL_SETPEER 0x0040 -#define MISDN_CTRL_UNSETPEER 0x0080 -#define MISDN_CTRL_RX_OFF 0x0100 -#define MISDN_CTRL_FILL_EMPTY 0x0200 -#define MISDN_CTRL_GETPEER 0x0400 -#define MISDN_CTRL_L1_TIMER3 0x0800 -#define MISDN_CTRL_HW_FEATURES_OP 0x2000 -#define MISDN_CTRL_HW_FEATURES 0x2001 -#define MISDN_CTRL_HFC_OP 0x4000 -#define MISDN_CTRL_HFC_PCM_CONN 0x4001 -#define MISDN_CTRL_HFC_PCM_DISC 0x4002 -#define MISDN_CTRL_HFC_CONF_JOIN 0x4003 -#define MISDN_CTRL_HFC_CONF_SPLIT 0x4004 -#define MISDN_CTRL_HFC_RECEIVE_OFF 0x4005 -#define MISDN_CTRL_HFC_RECEIVE_ON 0x4006 -#define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007 -#define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008 -#define MISDN_CTRL_HFC_WD_INIT 0x4009 -#define MISDN_CTRL_HFC_WD_RESET 0x400A - -/* special RX buffer value for MISDN_CTRL_RX_BUFFER request.p1 is the minimum - * buffer size request.p2 the maximum. Using MISDN_CTRL_RX_SIZE_IGNORE will - * not change the value, but still read back the actual stetting. - */ -#define MISDN_CTRL_RX_SIZE_IGNORE -1 - -/* socket options */ -#define MISDN_TIME_STAMP 0x0001 - -struct mISDN_ctrl_req { - int op; - int channel; - int p1; - int p2; -}; - -/* muxer options */ -#define MISDN_OPT_ALL 1 -#define MISDN_OPT_TEIMGR 2 - -#ifdef __KERNEL__ -#include <linux/list.h> -#include <linux/skbuff.h> -#include <linux/net.h> -#include <net/sock.h> -#include <linux/completion.h> - -#define DEBUG_CORE 0x000000ff -#define DEBUG_CORE_FUNC 0x00000002 -#define DEBUG_SOCKET 0x00000004 -#define DEBUG_MANAGER 0x00000008 -#define DEBUG_SEND_ERR 0x00000010 -#define DEBUG_MSG_THREAD 0x00000020 -#define DEBUG_QUEUE_FUNC 0x00000040 -#define DEBUG_L1 0x0000ff00 -#define DEBUG_L1_FSM 0x00000200 -#define DEBUG_L2 0x00ff0000 -#define DEBUG_L2_FSM 0x00020000 -#define DEBUG_L2_CTRL 0x00040000 -#define DEBUG_L2_RECV 0x00080000 -#define DEBUG_L2_TEI 0x00100000 -#define DEBUG_L2_TEIFSM 0x00200000 -#define DEBUG_TIMER 0x01000000 -#define DEBUG_CLOCK 0x02000000 - -#define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0]) -#define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim) -#define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id) - -/* socket states */ -#define MISDN_OPEN 1 -#define MISDN_BOUND 2 -#define MISDN_CLOSED 3 - -struct mISDNchannel; -struct mISDNdevice; -struct mISDNstack; -struct mISDNclock; - -struct channel_req { - u_int protocol; - struct sockaddr_mISDN adr; - struct mISDNchannel *ch; -}; - -typedef int (ctrl_func_t)(struct mISDNchannel *, u_int, void *); -typedef int (send_func_t)(struct mISDNchannel *, struct sk_buff *); -typedef int (create_func_t)(struct channel_req *); - -struct Bprotocol { - struct list_head list; - char *name; - u_int Bprotocols; - create_func_t *create; -}; - -struct mISDNchannel { - struct list_head list; - u_int protocol; - u_int nr; - u_long opt; - u_int addr; - struct mISDNstack *st; - struct mISDNchannel *peer; - send_func_t *send; - send_func_t *recv; - ctrl_func_t *ctrl; -}; - -struct mISDN_sock_list { - struct hlist_head head; - rwlock_t lock; -}; - -struct mISDN_sock { - struct sock sk; - struct mISDNchannel ch; - u_int cmask; - struct mISDNdevice *dev; -}; - - - -struct mISDNdevice { - struct mISDNchannel D; - u_int id; - u_int Dprotocols; - u_int Bprotocols; - u_int nrbchan; - u_char channelmap[MISDN_CHMAP_SIZE]; - struct list_head bchannels; - struct mISDNchannel *teimgr; - struct device dev; -}; - -struct mISDNstack { - u_long status; - struct mISDNdevice *dev; - struct task_struct *thread; - struct completion *notify; - wait_queue_head_t workq; - struct sk_buff_head msgq; - struct list_head layer2; - struct mISDNchannel *layer1; - struct mISDNchannel own; - struct mutex lmutex; /* protect lists */ - struct mISDN_sock_list l1sock; -#ifdef MISDN_MSG_STATS - u_int msg_cnt; - u_int sleep_cnt; - u_int stopped_cnt; -#endif -}; - -typedef int (clockctl_func_t)(void *, int); - -struct mISDNclock { - struct list_head list; - char name[64]; - int pri; - clockctl_func_t *ctl; - void *priv; -}; - -/* global alloc/queue functions */ - -static inline struct sk_buff * -mI_alloc_skb(unsigned int len, gfp_t gfp_mask) -{ - struct sk_buff *skb; - - skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); - if (likely(skb)) - skb_reserve(skb, MISDN_HEADER_LEN); - return skb; -} - -static inline struct sk_buff * -_alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) -{ - struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); - struct mISDNhead *hh; - - if (!skb) - return NULL; - if (len) - skb_put_data(skb, dp, len); - hh = mISDN_HEAD_P(skb); - hh->prim = prim; - hh->id = id; - return skb; -} - -static inline void -_queue_data(struct mISDNchannel *ch, u_int prim, - u_int id, u_int len, void *dp, gfp_t gfp_mask) -{ - struct sk_buff *skb; - - if (!ch->peer) - return; - skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); - if (!skb) - return; - if (ch->recv(ch->peer, skb)) - dev_kfree_skb(skb); -} - -/* global register/unregister functions */ - -extern int mISDN_register_device(struct mISDNdevice *, - struct device *parent, char *name); -extern void mISDN_unregister_device(struct mISDNdevice *); -extern int mISDN_register_Bprotocol(struct Bprotocol *); -extern void mISDN_unregister_Bprotocol(struct Bprotocol *); -extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *, - void *); -extern void mISDN_unregister_clock(struct mISDNclock *); - -static inline struct mISDNdevice *dev_to_mISDN(const struct device *dev) -{ - if (dev) - return dev_get_drvdata(dev); - else - return NULL; -} - -extern void set_channel_address(struct mISDNchannel *, u_int, u_int); -extern void mISDN_clock_update(struct mISDNclock *, int, ktime_t *); -extern unsigned short mISDN_clock_get(void); -extern const char *mISDNDevName4ch(struct mISDNchannel *); - -#endif /* __KERNEL__ */ -#endif /* mISDNIF_H */ diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index c6eea9afb943..e5997120f45c 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h @@ -45,6 +45,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg); int mbox_flush(struct mbox_chan *chan, unsigned long timeout); void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */ +unsigned int mbox_chan_tx_slots_available(struct mbox_chan *chan); /* atomic */ void mbox_free_channel(struct mbox_chan *chan); /* may sleep */ #endif /* __MAILBOX_CLIENT_H */ diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h index 80a427c7ca29..dc93287a2a01 100644 --- a/include/linux/mailbox_controller.h +++ b/include/linux/mailbox_controller.h @@ -3,6 +3,7 @@ #ifndef __MAILBOX_CONTROLLER_H #define __MAILBOX_CONTROLLER_H +#include <linux/bits.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/hrtimer.h> @@ -11,6 +12,13 @@ struct mbox_chan; +/* Sentinel value distinguishing "no active request" from "NULL message data" */ +#define MBOX_NO_MSG ((void *)-1) + +#define MBOX_TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */ +#define MBOX_TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */ +#define MBOX_TXDONE_BY_ACK BIT(2) /* S/W ACK received by Client ticks the TX */ + /** * struct mbox_chan_ops - methods to control mailbox channels * @send_data: The API asks the MBOX controller driver, in atomic @@ -54,10 +62,10 @@ struct mbox_chan_ops { /** * struct mbox_controller - Controller of a class of communication channels - * @dev: Device backing this controller - * @ops: Operators that work on each communication chan - * @chans: Array of channels - * @num_chans: Number of channels in the 'chans' array. + * @dev: Device backing this controller. Required. + * @ops: Operators that work on each communication chan. Required. + * @chans: Array of channels. Required. + * @num_chans: Number of channels in the 'chans' array. Required. * @txdone_irq: Indicates if the controller can report to API when * the last transmitted data was read by the remote. * Eg, if it has some TX ACK irq. @@ -70,6 +78,7 @@ struct mbox_chan_ops { * @of_xlate: Controller driver specific mapping of channel via DT * @poll_hrt: API private. hrtimer used to poll for TXDONE on all * channels. + * @poll_hrt_lock: API private. Lock protecting access to poll_hrt. * @node: API private. To hook into list of controllers. */ struct mbox_controller { diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 0c464eade1d6..4a5631906aff 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -4,7 +4,7 @@ /* * Maple Tree - An RCU-safe adaptive tree for storing ranges * Copyright (c) 2018-2022 Oracle - * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com> + * Authors: Liam R. Howlett <liam@infradead.org> * Matthew Wilcox <willy@infradead.org> */ diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index a82755e1fc40..5bdbd9e1d460 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -112,6 +112,7 @@ int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, mei_cldev_cb_t notif_cb); +const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); u8 mei_cldev_ver(const struct mei_cl_device *cldev); size_t mei_cldev_mtu(const struct mei_cl_device *cldev); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 9eac4f268359..b0f750d22a7b 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -173,8 +173,6 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid); -void memblock_free_late(phys_addr_t base, phys_addr_t size); - #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, phys_addr_t *out_start, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5173a9f16721..dc3fa687759b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -115,6 +115,16 @@ struct mem_cgroup_per_node { unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; struct mem_cgroup_reclaim_iter iter; + /* + * objcg is wiped out as a part of the objcg repaprenting process. + * orig_objcg preserves a pointer (and a reference) to the original + * objcg until the end of live of memcg. + */ + struct obj_cgroup __rcu *objcg; + struct obj_cgroup *orig_objcg; + /* list of inherited objcgs, protected by objcg_lock */ + struct list_head objcg_list; + #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC /* slab stats for nmi context */ atomic_t slab_reclaimable; @@ -179,6 +189,7 @@ struct obj_cgroup { struct list_head list; /* protected by objcg_lock */ struct rcu_head rcu; }; + bool is_root; }; /* @@ -257,15 +268,6 @@ struct mem_cgroup { seqlock_t socket_pressure_seqlock; #endif int kmemcg_id; - /* - * memcg->objcg is wiped out as a part of the objcg repaprenting - * process. memcg->orig_objcg preserves a pointer (and a reference) - * to the original objcg until the end of live of memcg. - */ - struct obj_cgroup __rcu *objcg; - struct obj_cgroup *orig_objcg; - /* list of inherited objcgs, protected by objcg_lock */ - struct list_head objcg_list; struct memcg_vmstats_percpu __percpu *vmstats_percpu; @@ -367,9 +369,6 @@ enum objext_flags { #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1) #ifdef CONFIG_MEMCG - -static inline bool folio_memcg_kmem(struct folio *folio); - /* * After the initialization objcg->memcg is always pointing at * a valid memcg, but can be atomically swapped to the parent memcg. @@ -383,43 +382,19 @@ static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) } /* - * __folio_memcg - Get the memory cgroup associated with a non-kmem folio - * @folio: Pointer to the folio. - * - * Returns a pointer to the memory cgroup associated with the folio, - * or NULL. This function assumes that the folio is known to have a - * proper memory cgroup pointer. It's not safe to call this function - * against some type of folios, e.g. slab folios or ex-slab folios or - * kmem folios. - */ -static inline struct mem_cgroup *__folio_memcg(struct folio *folio) -{ - unsigned long memcg_data = folio->memcg_data; - - VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); - VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); - VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); - - return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); -} - -/* - * __folio_objcg - get the object cgroup associated with a kmem folio. + * folio_objcg - get the object cgroup associated with a folio. * @folio: Pointer to the folio. * * Returns a pointer to the object cgroup associated with the folio, * or NULL. This function assumes that the folio is known to have a - * proper object cgroup pointer. It's not safe to call this function - * against some type of folios, e.g. slab folios or ex-slab folios or - * LRU folios. + * proper object cgroup pointer. */ -static inline struct obj_cgroup *__folio_objcg(struct folio *folio) +static inline struct obj_cgroup *folio_objcg(struct folio *folio) { unsigned long memcg_data = folio->memcg_data; VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); - VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } @@ -433,21 +408,30 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio) * proper memory cgroup pointer. It's not safe to call this function * against some type of folios, e.g. slab folios or ex-slab folios. * - * For a non-kmem folio any of the following ensures folio and memcg binding - * stability: + * For a folio any of the following ensures folio and objcg binding stability: * * - the folio lock * - LRU isolation * - exclusive reference * - * For a kmem folio a caller should hold an rcu read lock to protect memcg - * associated with a kmem folio from being released. + * Based on the stable binding of folio and objcg, for a folio any of the + * following ensures folio and memcg binding stability: + * + * - cgroup_mutex + * - the lruvec lock + * + * If the caller only want to ensure that the page counters of memcg are + * updated correctly, ensure that the binding stability of folio and objcg + * is sufficient. + * + * Note: The caller should hold an rcu read lock or cgroup_mutex to protect + * memcg associated with a folio from being released. */ static inline struct mem_cgroup *folio_memcg(struct folio *folio) { - if (folio_memcg_kmem(folio)) - return obj_cgroup_memcg(__folio_objcg(folio)); - return __folio_memcg(folio); + struct obj_cgroup *objcg = folio_objcg(folio); + + return objcg ? obj_cgroup_memcg(objcg) : NULL; } /* @@ -471,15 +455,10 @@ static inline bool folio_memcg_charged(struct folio *folio) * has an associated memory cgroup pointer or an object cgroups vector or * an object cgroup. * - * For a non-kmem folio any of the following ensures folio and memcg binding - * stability: + * The page and objcg or memcg binding rules can refer to folio_memcg(). * - * - the folio lock - * - LRU isolation - * - exclusive reference - * - * For a kmem folio a caller should hold an rcu read lock to protect memcg - * associated with a kmem folio from being released. + * A caller should hold an rcu read lock to protect memcg associated with a + * page from being released. */ static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) { @@ -488,18 +467,14 @@ static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) * for slabs, READ_ONCE() should be used here. */ unsigned long memcg_data = READ_ONCE(folio->memcg_data); + struct obj_cgroup *objcg; if (memcg_data & MEMCG_DATA_OBJEXTS) return NULL; - if (memcg_data & MEMCG_DATA_KMEM) { - struct obj_cgroup *objcg; + objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); - objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); - return obj_cgroup_memcg(objcg); - } - - return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); + return objcg ? obj_cgroup_memcg(objcg) : NULL; } static inline struct mem_cgroup *page_memcg_check(struct page *page) @@ -548,6 +523,11 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) return (memcg == root_mem_cgroup); } +static inline bool obj_cgroup_is_root(const struct obj_cgroup *objcg) +{ + return objcg->is_root; +} + static inline bool mem_cgroup_disabled(void) { return !cgroup_subsys_enabled(memory_cgrp_subsys); @@ -735,7 +715,15 @@ out: * folio_lruvec - return lruvec for isolating/putting an LRU folio * @folio: Pointer to the folio. * - * This function relies on folio->mem_cgroup being stable. + * Call with rcu_read_lock() held to ensure the lifetime of the returned lruvec. + * Note that this alone will NOT guarantee the stability of the folio->lruvec + * association; the folio can be reparented to an ancestor if this races with + * cgroup deletion. + * + * Use folio_lruvec_lock() to ensure both lifetime and stability of the binding. + * Once a lruvec is locked, folio_lruvec() can be called on other folios, and + * their binding is stable if the returned lruvec matches the one the caller has + * locked. Useful for lock batching. */ static inline struct lruvec *folio_lruvec(struct folio *folio) { @@ -758,15 +746,6 @@ struct lruvec *folio_lruvec_lock_irq(struct folio *folio); struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, unsigned long *flags); -#ifdef CONFIG_DEBUG_VM -void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); -#else -static inline -void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) -{ -} -#endif - static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; @@ -774,23 +753,26 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) { + if (obj_cgroup_is_root(objcg)) + return true; return percpu_ref_tryget(&objcg->refcnt); } -static inline void obj_cgroup_get(struct obj_cgroup *objcg) +static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, + unsigned long nr) { - percpu_ref_get(&objcg->refcnt); + if (!obj_cgroup_is_root(objcg)) + percpu_ref_get_many(&objcg->refcnt, nr); } -static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, - unsigned long nr) +static inline void obj_cgroup_get(struct obj_cgroup *objcg) { - percpu_ref_get_many(&objcg->refcnt, nr); + obj_cgroup_get_many(objcg, 1); } static inline void obj_cgroup_put(struct obj_cgroup *objcg) { - if (objcg) + if (objcg && !obj_cgroup_is_root(objcg)) percpu_ref_put(&objcg->refcnt); } @@ -885,7 +867,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, return match; } -struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); +struct cgroup_subsys_state *get_mem_cgroup_css_from_folio(struct folio *folio); ino_t page_cgroup_ino(struct page *page); static inline bool mem_cgroup_online(struct mem_cgroup *memcg) @@ -896,7 +878,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) } void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, - int zid, int nr_pages); + int zid, long nr_pages); static inline unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, @@ -966,10 +948,15 @@ void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, static inline void count_memcg_folio_events(struct folio *folio, enum vm_event_item idx, unsigned long nr) { - struct mem_cgroup *memcg = folio_memcg(folio); + struct mem_cgroup *memcg; - if (memcg) - count_memcg_events(memcg, idx, nr); + if (!folio_memcg_charged(folio)) + return; + + rcu_read_lock(); + memcg = folio_memcg(folio); + count_memcg_events(memcg, idx, nr); + rcu_read_unlock(); } static inline void count_memcg_events_mm(struct mm_struct *mm, @@ -1087,6 +1074,11 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) return true; } +static inline bool obj_cgroup_is_root(const struct obj_cgroup *objcg) +{ + return true; +} + static inline bool mem_cgroup_disabled(void) { return true; @@ -1179,11 +1171,6 @@ static inline struct lruvec *folio_lruvec(struct folio *folio) return &pgdat->__lruvec; } -static inline -void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) -{ -} - static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { return NULL; @@ -1242,6 +1229,7 @@ static inline struct lruvec *folio_lruvec_lock(struct folio *folio) { struct pglist_data *pgdat = folio_pgdat(folio); + rcu_read_lock(); spin_lock(&pgdat->__lruvec.lru_lock); return &pgdat->__lruvec; } @@ -1250,6 +1238,7 @@ static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) { struct pglist_data *pgdat = folio_pgdat(folio); + rcu_read_lock(); spin_lock_irq(&pgdat->__lruvec.lru_lock); return &pgdat->__lruvec; } @@ -1259,6 +1248,7 @@ static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, { struct pglist_data *pgdat = folio_pgdat(folio); + rcu_read_lock(); spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); return &pgdat->__lruvec; } @@ -1479,20 +1469,28 @@ static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); } -static inline void unlock_page_lruvec(struct lruvec *lruvec) +static inline void lruvec_lock_irq(struct lruvec *lruvec) +{ + rcu_read_lock(); + spin_lock_irq(&lruvec->lru_lock); +} + +static inline void lruvec_unlock(struct lruvec *lruvec) { spin_unlock(&lruvec->lru_lock); + rcu_read_unlock(); } -static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) +static inline void lruvec_unlock_irq(struct lruvec *lruvec) { spin_unlock_irq(&lruvec->lru_lock); + rcu_read_unlock(); } -static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, - unsigned long flags) +static inline void lruvec_unlock_irqrestore(struct lruvec *lruvec, unsigned long flags) { spin_unlock_irqrestore(&lruvec->lru_lock, flags); + rcu_read_unlock(); } /* Test requires a stable folio->memcg binding, see folio_memcg() */ @@ -1511,7 +1509,7 @@ static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, if (folio_matches_lruvec(folio, locked_lruvec)) return locked_lruvec; - unlock_page_lruvec_irq(locked_lruvec); + lruvec_unlock_irq(locked_lruvec); } return folio_lruvec_lock_irq(folio); @@ -1525,7 +1523,7 @@ static inline void folio_lruvec_relock_irqsave(struct folio *folio, if (folio_matches_lruvec(folio, *lruvecp)) return; - unlock_page_lruvec_irqrestore(*lruvecp, *flags); + lruvec_unlock_irqrestore(*lruvecp, *flags); } *lruvecp = folio_lruvec_lock_irqsave(folio, flags); @@ -1549,9 +1547,14 @@ static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, if (mem_cgroup_disabled()) return; + if (!folio_memcg_charged(folio)) + return; + + rcu_read_lock(); memcg = folio_memcg(folio); - if (unlikely(memcg && &memcg->css != wb->memcg_css)) + if (unlikely(&memcg->css != wb->memcg_css)) mem_cgroup_track_foreign_dirty_slowpath(folio, wb); + rcu_read_unlock(); } void mem_cgroup_flush_foreign(struct bdi_writeback *wb); diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h index 80d675a03b39..07c2081867fd 100644 --- a/include/linux/mfd/atmel-hlcdc.h +++ b/include/linux/mfd/atmel-hlcdc.h @@ -75,6 +75,7 @@ */ struct atmel_hlcdc { struct regmap *regmap; + struct clk *lvds_pll_clk; struct clk *periph_clk; struct clk *sys_clk; struct clk *slow_clk; diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h index f70a810c55f7..d2e17ab1dbfc 100644 --- a/include/linux/mfd/bcm2835-pm.h +++ b/include/linux/mfd/bcm2835-pm.h @@ -5,11 +5,18 @@ #include <linux/regmap.h> +enum bcm2835_soc { + BCM2835_PM_SOC_BCM2835, + BCM2835_PM_SOC_BCM2711, + BCM2835_PM_SOC_BCM2712, +}; + struct bcm2835_pm { struct device *dev; void __iomem *base; void __iomem *asb; void __iomem *rpivid_asb; + enum bcm2835_soc soc; }; #endif /* BCM2835_MFD_PM_H */ diff --git a/include/linux/mfd/cgbc.h b/include/linux/mfd/cgbc.h index badbec4c7033..91f501e76c8f 100644 --- a/include/linux/mfd/cgbc.h +++ b/include/linux/mfd/cgbc.h @@ -26,8 +26,8 @@ struct cgbc_version { * @io_cmd: Pointer to the command IO memory * @session: Session id returned by the Board Controller * @dev: Pointer to kernel device structure - * @cgbc_version: Board Controller version structure - * @mutex: Board Controller mutex + * @version: Board Controller version structure + * @lock: Board Controller mutex */ struct cgbc_device_data { void __iomem *io_session; diff --git a/include/linux/mfd/kempld.h b/include/linux/mfd/kempld.h index 2dbd80abfd1d..5d75071eaaea 100644 --- a/include/linux/mfd/kempld.h +++ b/include/linux/mfd/kempld.h @@ -98,10 +98,10 @@ struct kempld_device_data { /** * struct kempld_platform_data - PLD hardware configuration structure * @pld_clock: PLD clock frequency - * @gpio_base GPIO base pin number + * @gpio_base: GPIO base pin number * @ioresource: IO addresses of the PLD - * @get_mutex: PLD specific get_mutex callback - * @release_mutex: PLD specific release_mutex callback + * @get_hardware_mutex: PLD specific get_mutex callback + * @release_hardware_mutex: PLD specific release_mutex callback * @get_info: PLD specific get_info callback * @register_cells: PLD specific register_cells callback */ diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h index 1fbda1f8967d..1819aa743c5c 100644 --- a/include/linux/mfd/lpc_ich.h +++ b/include/linux/mfd/lpc_ich.h @@ -37,4 +37,6 @@ struct lpc_ich_info { u8 use_gpio; }; +extern const struct software_node lpc_ich_gpio_swnode; + #endif diff --git a/include/linux/mfd/max77759.h b/include/linux/mfd/max77759.h index c6face34e385..ec19be952877 100644 --- a/include/linux/mfd/max77759.h +++ b/include/linux/mfd/max77759.h @@ -59,35 +59,65 @@ #define MAX77759_MAXQ_REG_AP_DATAIN0 0xb1 #define MAX77759_MAXQ_REG_UIC_SWRST 0xe0 -#define MAX77759_CHGR_REG_CHG_INT 0xb0 -#define MAX77759_CHGR_REG_CHG_INT2 0xb1 -#define MAX77759_CHGR_REG_CHG_INT_MASK 0xb2 -#define MAX77759_CHGR_REG_CHG_INT2_MASK 0xb3 -#define MAX77759_CHGR_REG_CHG_INT_OK 0xb4 -#define MAX77759_CHGR_REG_CHG_DETAILS_00 0xb5 -#define MAX77759_CHGR_REG_CHG_DETAILS_01 0xb6 -#define MAX77759_CHGR_REG_CHG_DETAILS_02 0xb7 -#define MAX77759_CHGR_REG_CHG_DETAILS_03 0xb8 -#define MAX77759_CHGR_REG_CHG_CNFG_00 0xb9 -#define MAX77759_CHGR_REG_CHG_CNFG_01 0xba -#define MAX77759_CHGR_REG_CHG_CNFG_02 0xbb -#define MAX77759_CHGR_REG_CHG_CNFG_03 0xbc -#define MAX77759_CHGR_REG_CHG_CNFG_04 0xbd -#define MAX77759_CHGR_REG_CHG_CNFG_05 0xbe -#define MAX77759_CHGR_REG_CHG_CNFG_06 0xbf -#define MAX77759_CHGR_REG_CHG_CNFG_07 0xc0 -#define MAX77759_CHGR_REG_CHG_CNFG_08 0xc1 -#define MAX77759_CHGR_REG_CHG_CNFG_09 0xc2 -#define MAX77759_CHGR_REG_CHG_CNFG_10 0xc3 -#define MAX77759_CHGR_REG_CHG_CNFG_11 0xc4 -#define MAX77759_CHGR_REG_CHG_CNFG_12 0xc5 -#define MAX77759_CHGR_REG_CHG_CNFG_13 0xc6 -#define MAX77759_CHGR_REG_CHG_CNFG_14 0xc7 -#define MAX77759_CHGR_REG_CHG_CNFG_15 0xc8 -#define MAX77759_CHGR_REG_CHG_CNFG_16 0xc9 -#define MAX77759_CHGR_REG_CHG_CNFG_17 0xca -#define MAX77759_CHGR_REG_CHG_CNFG_18 0xcb -#define MAX77759_CHGR_REG_CHG_CNFG_19 0xcc +#define MAX77759_CHGR_REG_CHG_INT 0xb0 +#define MAX77759_CHGR_REG_CHG_INT_AICL BIT(7) +#define MAX77759_CHGR_REG_CHG_INT_CHGIN BIT(6) +#define MAX77759_CHGR_REG_CHG_INT_WCIN BIT(5) +#define MAX77759_CHGR_REG_CHG_INT_CHG BIT(4) +#define MAX77759_CHGR_REG_CHG_INT_BAT BIT(3) +#define MAX77759_CHGR_REG_CHG_INT_INLIM BIT(2) +#define MAX77759_CHGR_REG_CHG_INT_THM2 BIT(1) +#define MAX77759_CHGR_REG_CHG_INT_BYP BIT(0) +#define MAX77759_CHGR_REG_CHG_INT2 0xb1 +#define MAX77759_CHGR_REG_CHG_INT2_INSEL BIT(7) +#define MAX77759_CHGR_REG_CHG_INT2_SYS_UVLO1 BIT(6) +#define MAX77759_CHGR_REG_CHG_INT2_SYS_UVLO2 BIT(5) +#define MAX77759_CHGR_REG_CHG_INT2_BAT_OILO BIT(4) +#define MAX77759_CHGR_REG_CHG_INT2_CHG_STA_CC BIT(3) +#define MAX77759_CHGR_REG_CHG_INT2_CHG_STA_CV BIT(2) +#define MAX77759_CHGR_REG_CHG_INT2_CHG_STA_TO BIT(1) +#define MAX77759_CHGR_REG_CHG_INT2_CHG_STA_DONE BIT(0) +#define MAX77759_CHGR_REG_CHG_INT_MASK 0xb2 +#define MAX77759_CHGR_REG_CHG_INT2_MASK 0xb3 +#define MAX77759_CHGR_REG_CHG_INT_OK 0xb4 +#define MAX77759_CHGR_REG_CHG_DETAILS_00 0xb5 +#define MAX77759_CHGR_REG_CHG_DETAILS_00_CHGIN_DTLS GENMASK(6, 5) +#define MAX77759_CHGR_REG_CHG_DETAILS_01 0xb6 +#define MAX77759_CHGR_REG_CHG_DETAILS_01_BAT_DTLS GENMASK(6, 4) +#define MAX77759_CHGR_REG_CHG_DETAILS_01_CHG_DTLS GENMASK(3, 0) +#define MAX77759_CHGR_REG_CHG_DETAILS_02 0xb7 +#define MAX77759_CHGR_REG_CHG_DETAILS_02_CHGIN_STS BIT(5) +#define MAX77759_CHGR_REG_CHG_DETAILS_03 0xb8 +#define MAX77759_CHGR_REG_CHG_CNFG_00 0xb9 +#define MAX77759_CHGR_REG_CHG_CNFG_00_MODE GENMASK(3, 0) +#define MAX77759_CHGR_REG_CHG_CNFG_01 0xba +#define MAX77759_CHGR_REG_CHG_CNFG_02 0xbb +#define MAX77759_CHGR_REG_CHG_CNFG_02_CHGCC GENMASK(5, 0) +#define MAX77759_CHGR_REG_CHG_CNFG_03 0xbc +#define MAX77759_CHGR_REG_CHG_CNFG_04 0xbd +#define MAX77759_CHGR_REG_CHG_CNFG_04_CHG_CV_PRM GENMASK(5, 0) +#define MAX77759_CHGR_REG_CHG_CNFG_05 0xbe +#define MAX77759_CHGR_REG_CHG_CNFG_06 0xbf +#define MAX77759_CHGR_REG_CHG_CNFG_06_CHGPROT GENMASK(3, 2) +#define MAX77759_CHGR_REG_CHG_CNFG_07 0xc0 +#define MAX77759_CHGR_REG_CHG_CNFG_08 0xc1 +#define MAX77759_CHGR_REG_CHG_CNFG_09 0xc2 +#define MAX77759_CHGR_REG_CHG_CNFG_09_CHGIN_ILIM GENMASK(6, 0) +#define MAX77759_CHGR_REG_CHG_CNFG_10 0xc3 +#define MAX77759_CHGR_REG_CHG_CNFG_11 0xc4 +#define MAX77759_CHGR_REG_CHG_CNFG_12 0xc5 +/* Wireless Charging input channel select */ +#define MAX77759_CHGR_REG_CHG_CNFG_12_WCINSEL BIT(6) +/* CHGIN/USB input channel select */ +#define MAX77759_CHGR_REG_CHG_CNFG_12_CHGINSEL BIT(5) +#define MAX77759_CHGR_REG_CHG_CNFG_13 0xc6 +#define MAX77759_CHGR_REG_CHG_CNFG_14 0xc7 +#define MAX77759_CHGR_REG_CHG_CNFG_15 0xc8 +#define MAX77759_CHGR_REG_CHG_CNFG_16 0xc9 +#define MAX77759_CHGR_REG_CHG_CNFG_17 0xca +#define MAX77759_CHGR_REG_CHG_CNFG_18 0xcb +#define MAX77759_CHGR_REG_CHG_CNFG_18_WDTEN BIT(0) +#define MAX77759_CHGR_REG_CHG_CNFG_19 0xcc /* MaxQ opcodes for max77759_maxq_command() */ #define MAX77759_MAXQ_OPCODE_MAXLENGTH (MAX77759_MAXQ_REG_AP_DATAOUT32 - \ @@ -102,6 +132,84 @@ #define MAX77759_MAXQ_OPCODE_USER_SPACE_WRITE 0x82 /** + * enum max77759_chgr_chgin_dtls_status - Charger Input Status + * @MAX77759_CHGR_CHGIN_DTLS_VBUS_UNDERVOLTAGE: + * Charger input voltage (Vchgin) < Under Voltage Threshold (Vuvlo) + * @MAX77759_CHGR_CHGIN_DTLS_VBUS_MARGINAL_VOLTAGE: + * Vchgin > Vuvlo and Vchgin < (Battery Voltage (Vbatt) + system voltage (Vsys)) + * @MAX77759_CHGR_CHGIN_DTLS_VBUS_OVERVOLTAGE: + * Vchgin > Over Voltage threshold (Vovlo) + * @MAX77759_CHGR_CHGIN_DTLS_VBUS_VALID: + * Vchgin > Vuvlo, Vchgin < Vovlo and Vchgin > (Vsys + Vbatt) + */ +enum max77759_chgr_chgin_dtls_status { + MAX77759_CHGR_CHGIN_DTLS_VBUS_UNDERVOLTAGE, + MAX77759_CHGR_CHGIN_DTLS_VBUS_MARGINAL_VOLTAGE, + MAX77759_CHGR_CHGIN_DTLS_VBUS_OVERVOLTAGE, + MAX77759_CHGR_CHGIN_DTLS_VBUS_VALID, +}; + +/** + * enum max77759_chgr_bat_dtls_states - Battery Details + * @MAX77759_CHGR_BAT_DTLS_NO_BATT_CHG_SUSP: No battery and the charger suspended + * @MAX77759_CHGR_BAT_DTLS_DEAD_BATTERY: Vbatt < Vtrickle + * @MAX77759_CHGR_BAT_DTLS_BAT_CHG_TIMER_FAULT: Charging suspended due to timer fault + * @MAX77759_CHGR_BAT_DTLS_BAT_OKAY: Battery okay and Vbatt > Min Sys Voltage (Vsysmin) + * @MAX77759_CHGR_BAT_DTLS_BAT_UNDERVOLTAGE: Battery is okay. Vtrickle < Vbatt < Vsysmin + * @MAX77759_CHGR_BAT_DTLS_BAT_OVERVOLTAGE: Battery voltage > Overvoltage threshold + * @MAX77759_CHGR_BAT_DTLS_BAT_OVERCURRENT: Battery current exceeds overcurrent threshold + * @MAX77759_CHGR_BAT_DTLS_BAT_ONLY_MODE: Battery only mode and battery level not available + */ +enum max77759_chgr_bat_dtls_states { + MAX77759_CHGR_BAT_DTLS_NO_BATT_CHG_SUSP, + MAX77759_CHGR_BAT_DTLS_DEAD_BATTERY, + MAX77759_CHGR_BAT_DTLS_BAT_CHG_TIMER_FAULT, + MAX77759_CHGR_BAT_DTLS_BAT_OKAY, + MAX77759_CHGR_BAT_DTLS_BAT_UNDERVOLTAGE, + MAX77759_CHGR_BAT_DTLS_BAT_OVERVOLTAGE, + MAX77759_CHGR_BAT_DTLS_BAT_OVERCURRENT, + MAX77759_CHGR_BAT_DTLS_BAT_ONLY_MODE, +}; + +/** + * enum max77759_chgr_chg_dtls_states - Charger Details + * @MAX77759_CHGR_CHG_DTLS_PREQUAL: Charger in prequalification mode + * @MAX77759_CHGR_CHG_DTLS_CC: Charger in fast charge const curr mode + * @MAX77759_CHGR_CHG_DTLS_CV: Charger in fast charge const voltage mode + * @MAX77759_CHGR_CHG_DTLS_TO: Charger is in top off mode + * @MAX77759_CHGR_CHG_DTLS_DONE: Charger is done + * @MAX77759_CHGR_CHG_DTLS_RSVD_1: Reserved + * @MAX77759_CHGR_CHG_DTLS_TIMER_FAULT: Charger is in timer fault mode + * @MAX77759_CHGR_CHG_DTLS_SUSP_BATT_THM: Charger is suspended as battery removal detected + * @MAX77759_CHGR_CHG_DTLS_OFF: Charger is off. Input invalid or charger disabled + * @MAX77759_CHGR_CHG_DTLS_RSVD_2: Reserved + * @MAX77759_CHGR_CHG_DTLS_RSVD_3: Reserved + * @MAX77759_CHGR_CHG_DTLS_OFF_WDOG_TIMER: Charger is off as watchdog timer expired + * @MAX77759_CHGR_CHG_DTLS_SUSP_JEITA: Charger is in JEITA control mode + */ +enum max77759_chgr_chg_dtls_states { + MAX77759_CHGR_CHG_DTLS_PREQUAL, + MAX77759_CHGR_CHG_DTLS_CC, + MAX77759_CHGR_CHG_DTLS_CV, + MAX77759_CHGR_CHG_DTLS_TO, + MAX77759_CHGR_CHG_DTLS_DONE, + MAX77759_CHGR_CHG_DTLS_RSVD_1, + MAX77759_CHGR_CHG_DTLS_TIMER_FAULT, + MAX77759_CHGR_CHG_DTLS_SUSP_BATT_THM, + MAX77759_CHGR_CHG_DTLS_OFF, + MAX77759_CHGR_CHG_DTLS_RSVD_2, + MAX77759_CHGR_CHG_DTLS_RSVD_3, + MAX77759_CHGR_CHG_DTLS_OFF_WDOG_TIMER, + MAX77759_CHGR_CHG_DTLS_SUSP_JEITA, +}; + +enum max77759_chgr_mode { + MAX77759_CHGR_MODE_OFF, + MAX77759_CHGR_MODE_CHG_BUCK_ON = 0x5, + MAX77759_CHGR_MODE_OTG_BOOST_ON = 0xA, +}; + +/** * struct max77759 - core max77759 internal data structure * * @regmap_top: Regmap for accessing TOP registers diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index b774c3a4bb62..340fc72e22aa 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -12,9 +12,9 @@ enum chip_id { MT6323_CHIP_ID = 0x23, - MT6328_CHIP_ID = 0x30, - MT6331_CHIP_ID = 0x20, - MT6332_CHIP_ID = 0x20, + MT6328_CHIP_ID = 0x28, + MT6331_CHIP_ID = 0x31, + MT6332_CHIP_ID = 0x32, MT6357_CHIP_ID = 0x57, MT6358_CHIP_ID = 0x58, MT6359_CHIP_ID = 0x59, diff --git a/include/linux/mfd/rsmu.h b/include/linux/mfd/rsmu.h index 0379aa207428..2f27386a7122 100644 --- a/include/linux/mfd/rsmu.h +++ b/include/linux/mfd/rsmu.h @@ -19,7 +19,6 @@ enum rsmu_type { }; /** - * * struct rsmu_ddata - device data structure for sub devices. * * @dev: i2c/spi device. diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h index dd95c37ca134..e913b2cdf77d 100644 --- a/include/linux/mfd/si476x-core.h +++ b/include/linux/mfd/si476x-core.h @@ -77,6 +77,7 @@ enum si476x_power_state { * underlying "core" device which all the MFD cell-devices use. * * @client: Actual I2C client used to transfer commands to the chip. + * @regmap: Regmap for accessing the device registers * @chip_id: Last digit of the chip model(E.g. "1" for SI4761) * @cells: MFD cell devices created by this driver. * @cmd_lock: Mutex used to serialize all the requests to the core @@ -100,16 +101,18 @@ enum si476x_power_state { * @stc: Similar to @cts, but for the STC bit of the status value. * @power_up_parameters: Parameters used as argument for POWER_UP * command when the device is started. - * @state: Current power state of the device. - * @supplues: Structure containing handles to all power supplies used + * @power_state: Current power state of the device. + * @supplies: Structure containing handles to all power supplies used * by the device (NULL ones are ignored). * @gpio_reset: GPIO pin connectet to the RSTB pin of the chip. * @pinmux: Chip's configurable pins configuration. * @diversity_mode: Chips role when functioning in diversity mode. + * @is_alive: Chip is initialized and active. * @status_monitor: Polling worker used in polling use case scenarion * (when IRQ is not avalible). * @revision: Chip's running firmware revision number(Used for correct * command set support). + * @rds_fifo_depth: RDS FIFO size: 20 for IRQ mode or 5 for polling mode. */ struct si476x_core { @@ -166,6 +169,7 @@ static inline struct si476x_core *i2c_mfd_cell_to_core(struct device *dev) /** * si476x_core_lock() - lock the core device to get an exclusive access * to it. + * @core: Core device structure */ static inline void si476x_core_lock(struct si476x_core *core) { @@ -175,6 +179,7 @@ static inline void si476x_core_lock(struct si476x_core *core) /** * si476x_core_unlock() - unlock the core device to relinquish an * exclusive access to it. + * @core: Core device structure */ static inline void si476x_core_unlock(struct si476x_core *core) { @@ -246,9 +251,10 @@ static inline int si476x_to_v4l2(struct si476x_core *core, u16 freq) * struct si476x_func_info - structure containing result of the * FUNC_INFO command. * + * @firmware: Firmware version numbers. * @firmware.major: Firmware major number. * @firmware.minor[...]: Firmware minor numbers. - * @patch_id: + * @patch_id: Firmware patch level. * @func: Mode tuner is working in. */ struct si476x_func_info { @@ -318,8 +324,9 @@ enum si476x_smoothmetrics { * @tp: Current channel's TP flag. * @pty: Current channel's PTY code. * @pi: Current channel's PI code. - * @rdsfifoused: Number of blocks remaining in the RDS FIFO (0 if - * empty). + * @rdsfifoused: Number of blocks remaining in the RDS FIFO (0 if empty). + * @ble: + * @rds: RDS data descriptor */ struct si476x_rds_status_report { bool rdstpptyint, rdspiint, rdssyncint, rdsfifoint; diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 88ccb3e14f48..fb3ba639f4f8 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -86,16 +86,32 @@ enum mhi_ch_type { }; /** + * struct mhi_buf - MHI Buffer description + * @buf: Virtual address of the buffer + * @name: Buffer label. For offload channel, configurations name must be: + * ECA - Event context array data + * CCA - Channel context array data + * @dma_addr: IOMMU address of the buffer + * @len: # of bytes + */ +struct mhi_buf { + void *buf; + const char *name; + dma_addr_t dma_addr; + size_t len; +}; + +/** * struct image_info - Firmware and RDDM table * @mhi_buf: Buffer for firmware and RDDM table * @entries: # of entries in table */ struct image_info { - struct mhi_buf *mhi_buf; /* private: from internal.h */ struct bhi_vec_entry *bhi_vec; /* public: */ u32 entries; + struct mhi_buf mhi_buf[] __counted_by(entries); }; /** @@ -489,22 +505,6 @@ struct mhi_result { }; /** - * struct mhi_buf - MHI Buffer description - * @buf: Virtual address of the buffer - * @name: Buffer label. For offload channel, configurations name must be: - * ECA - Event context array data - * CCA - Channel context array data - * @dma_addr: IOMMU address of the buffer - * @len: # of bytes - */ -struct mhi_buf { - void *buf; - const char *name; - dma_addr_t dma_addr; - size_t len; -}; - -/** * struct mhi_driver - Structure representing a MHI client driver * @probe: CB function for client driver probe function * @remove: CB function for client driver remove function diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index e1ded9cf0f70..04b96c5abb57 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -705,23 +705,12 @@ struct mlx5_st; enum { MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, - MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, -}; - -enum { - MKEY_CACHE_LAST_STD_ENTRY = 20, - MLX5_IMR_KSM_CACHE_ENTRY, - MAX_MKEY_CACHE_ENTRIES }; struct mlx5_profile { u64 mask; u8 log_max_qp; u8 num_cmd_caches; - struct { - int size; - int limit; - } mr_cache[MAX_MKEY_CACHE_ENTRIES]; }; struct mlx5_hca_cap { diff --git a/include/linux/mm.h b/include/linux/mm.h index 8260e28205e9..af23453e9dbd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -758,6 +758,8 @@ struct vm_fault { */ }; +struct vm_uffd_ops; + /* * These are the virtual MM functions - opening of an area, closing and * unmapping it (needed to keep files on disk up-to-date etc), pointer @@ -865,6 +867,9 @@ struct vm_operations_struct { struct page *(*find_normal_page)(struct vm_area_struct *vma, unsigned long addr); #endif /* CONFIG_FIND_NORMAL_PAGE */ +#ifdef CONFIG_USERFAULTFD + const struct vm_uffd_ops *uffd_ops; +#endif }; #ifdef CONFIG_NUMA_BALANCING @@ -3928,9 +3933,6 @@ extern unsigned long free_reserved_area(void *start, void *end, extern void adjust_managed_page_count(struct page *page, long count); -extern void reserve_bootmem_region(phys_addr_t start, - phys_addr_t end, int nid); - /* Free the reserved page into the buddy system, so it gets managed. */ void free_reserved_page(struct page *page); @@ -4389,7 +4391,7 @@ static inline void mmap_action_map_kernel_pages_full(struct vm_area_desc *desc, int mmap_action_prepare(struct vm_area_desc *desc); int mmap_action_complete(struct vm_area_struct *vma, - struct mmap_action *action); + struct mmap_action *action, bool is_compat); /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 7fc2ced00f8f..a171070e15f0 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -348,6 +348,8 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) { enum lru_list lru = folio_lru_list(folio); + VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + if (lru_gen_add_folio(lruvec, folio, false)) return; @@ -362,6 +364,8 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) { enum lru_list lru = folio_lru_list(folio); + VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + if (lru_gen_add_folio(lruvec, folio, true)) return; @@ -376,6 +380,8 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) { enum lru_list lru = folio_lru_list(folio); + VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + if (lru_gen_del_folio(lruvec, folio, false)) return; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3bcdda226a91..9adb2ad21da5 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -694,6 +694,9 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg); void lru_gen_offline_memcg(struct mem_cgroup *memcg); void lru_gen_release_memcg(struct mem_cgroup *memcg); void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); +void max_lru_gen_memcg(struct mem_cgroup *memcg, int nid); +bool recheck_lru_gen_max_memcg(struct mem_cgroup *memcg, int nid); +void lru_gen_reparent_memcg(struct mem_cgroup *memcg, struct mem_cgroup *parent, int nid); #else /* !CONFIG_LRU_GEN */ @@ -735,6 +738,20 @@ static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) { } +static inline void max_lru_gen_memcg(struct mem_cgroup *memcg, int nid) +{ +} + +static inline bool recheck_lru_gen_max_memcg(struct mem_cgroup *memcg, int nid) +{ + return true; +} + +static inline +void lru_gen_reparent_memcg(struct mem_cgroup *memcg, struct mem_cgroup *parent, int nid) +{ +} + #endif /* CONFIG_LRU_GEN */ struct lruvec { @@ -2053,21 +2070,16 @@ static inline struct mem_section *__nr_to_section(unsigned long nr) extern size_t mem_section_usage_size(void); /* - * We use the lower bits of the mem_map pointer to store - * a little bit of information. The pointer is calculated - * as mem_map - section_nr_to_pfn(pnum). The result is - * aligned to the minimum alignment of the two values: - * 1. All mem_map arrays are page-aligned. - * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT - * lowest bits. PFN_SECTION_SHIFT is arch-specific - * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the - * worst combination is powerpc with 256k pages, - * which results in PFN_SECTION_SHIFT equal 6. - * To sum it up, at least 6 bits are available on all architectures. - * However, we can exceed 6 bits on some other architectures except - * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available - * with the worst case of 64K pages on arm64) if we make sure the - * exceeded bit is not applicable to powerpc. + * We use the lower bits of the mem_map pointer to store a little bit of + * information. The pointer is calculated as mem_map - section_nr_to_pfn(). + * The result is aligned to the minimum alignment of the two values: + * + * 1. All mem_map arrays are page-aligned. + * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT lowest bits. + * + * We always expect a single section to cover full pages. Therefore, + * we can safely assume that PFN_SECTION_SHIFT is large enough to + * accommodate SECTION_MAP_LAST_BIT. We use BUILD_BUG_ON() to ensure this. */ enum { SECTION_MARKED_PRESENT_BIT, diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index cf3374580f74..5d75cc5b057e 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -226,6 +226,7 @@ struct mr_table_ops { /** * struct mr_table - a multicast routing table + * @work: used for table destruction * @list: entry within a list of multicast routing tables * @net: net where this table belongs * @ops: protocol specific operations @@ -243,6 +244,7 @@ struct mr_table_ops { * @mroute_reg_vif_num: PIM-device vif index */ struct mr_table { + struct rcu_work work; struct list_head list; possible_net_t net; struct mr_table_ops ops; @@ -274,6 +276,7 @@ void vif_device_init(struct vif_device *v, unsigned short flags, unsigned short get_iflink_mask); +void mr_table_free(struct mr_table *mrt); struct mr_table * mr_table_alloc(struct net *net, u32 id, struct mr_table_ops *ops, diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h index d6f653e07426..f8d4d6ac1fc1 100644 --- a/include/linux/mtd/concat.h +++ b/include/linux/mtd/concat.h @@ -9,6 +9,18 @@ #define MTD_CONCAT_H +/* + * Our storage structure: + * Subdev points to an array of pointers to struct mtd_info objects + * which is allocated along with this structure + * + */ +struct mtd_concat { + struct mtd_info mtd; + int num_subdev; + struct mtd_info *subdev[]; +}; + struct mtd_info *mtd_concat_create( struct mtd_info *subdev[], /* subdevices to concatenate */ int num_devs, /* number of subdevices */ @@ -16,5 +28,54 @@ struct mtd_info *mtd_concat_create( void mtd_concat_destroy(struct mtd_info *mtd); -#endif +/** + * mtd_virt_concat_node_create - Create a component for concatenation + * + * Returns a positive number representing the no. of devices found for + * concatenation, or a negative error code. + * + * List all the devices for concatenations found in DT and create a + * component for concatenation. + */ +int mtd_virt_concat_node_create(void); + +/** + * mtd_virt_concat_add - add mtd_info object to the list of subdevices for concatenation + * @mtd: pointer to new MTD device info structure + * + * Returns true if the mtd_info object is added successfully else returns false. + * + * The mtd_info object is added to the list of subdevices for concatenation. + * It returns true if a match is found, and false if all subdevices have + * already been added or if the mtd_info object does not match any of the + * intended MTD devices. + */ +bool mtd_virt_concat_add(struct mtd_info *mtd); +/** + * mtd_virt_concat_create_join - Create and register the concatenated MTD device + * + * Returns 0 on succes, or a negative error code. + * + * Creates and registers the concatenated MTD device + */ +int mtd_virt_concat_create_join(void); + +/** + * mtd_virt_concat_destroy - Remove the concat that includes a specific mtd device + * as one of its components. + * @mtd: pointer to MTD device info structure. + * + * Returns 0 on succes, or a negative error code. + * + * If the mtd_info object is part of a concatenated device, all other MTD devices + * within that concat are registered individually. The concatenated device is then + * removed, along with its concatenation component. + * + */ +int mtd_virt_concat_destroy(struct mtd_info *mtd); + +void mtd_virt_concat_destroy_joins(void); +void mtd_virt_concat_destroy_items(void); + +#endif diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 6a024cf1c53a..782984ba3a20 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -290,6 +290,12 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) +#define SPINAND_PAGE_READ_PACKED_8D_8D_0_OP(addr) \ + SPI_MEM_OP(SPI_MEM_DTR_OP_PACKED_CMD(0x13, addr >> 16, 8), \ + SPI_MEM_DTR_OP_ADDR(2, addr & 0xffff, 8), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + #define SPINAND_PAGE_READ_FROM_CACHE_8D_8D_8D_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x9d, 8), \ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \ @@ -477,11 +483,13 @@ struct spinand_ecc_info { const struct mtd_ooblayout_ops *ooblayout; }; -#define SPINAND_HAS_QE_BIT BIT(0) -#define SPINAND_HAS_CR_FEAT_BIT BIT(1) +/* SPI NAND flags */ +#define SPINAND_HAS_QE_BIT BIT(0) +#define SPINAND_HAS_CR_FEAT_BIT BIT(1) #define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2) #define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3) #define SPINAND_NO_RAW_ACCESS BIT(4) +#define SPINAND_ODTR_PACKED_PAGE_READ BIT(5) /** * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7969fcdd5ac4..0e1e581efc5a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -162,7 +162,7 @@ static inline bool dev_xmit_complete(int rc) #if defined(CONFIG_HYPERV_NET) # define LL_MAX_HEADER 128 -#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) +#elif defined(CONFIG_WLAN) # if defined(CONFIG_MAC80211_MESH) # define LL_MAX_HEADER 128 # else @@ -1119,6 +1119,16 @@ struct netdev_net_notifier { * This function is called device changes address list filtering. * If driver handles unicast address filtering, it should set * IFF_UNICAST_FLT in its priv_flags. + * Cannot sleep, called with netif_addr_lock_bh held. + * Deprecated in favor of ndo_set_rx_mode_async. + * + * void (*ndo_set_rx_mode_async)(struct net_device *dev, + * struct netdev_hw_addr_list *uc, + * struct netdev_hw_addr_list *mc); + * Async version of ndo_set_rx_mode which runs in process context + * with rtnl_lock and netdev_lock_ops(dev) held. The uc/mc parameters + * are snapshots of the address lists - iterate with + * netdev_hw_addr_list_for_each(ha, uc). * * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); * This function is called when the Media Access Control address @@ -1439,6 +1449,10 @@ struct net_device_ops { void (*ndo_change_rx_flags)(struct net_device *dev, int flags); void (*ndo_set_rx_mode)(struct net_device *dev); + void (*ndo_set_rx_mode_async)( + struct net_device *dev, + struct netdev_hw_addr_list *uc, + struct netdev_hw_addr_list *mc); int (*ndo_set_mac_address)(struct net_device *dev, void *addr); int (*ndo_validate_addr)(struct net_device *dev); @@ -1903,6 +1917,9 @@ enum netdev_reg_state { * has been enabled due to the need to listen to * additional unicast addresses in a device that * does not implement ndo_set_rx_mode() + * @rx_mode_node: List entry for rx_mode work processing + * @rx_mode_tracker: Refcount tracker for rx_mode work + * @rx_mode_addr_cache: Recycled snapshot entries for rx_mode work * @uc: unicast mac addresses * @mc: multicast mac addresses * @dev_addrs: list of device hw addresses @@ -2294,6 +2311,9 @@ struct net_device { unsigned int promiscuity; unsigned int allmulti; bool uc_promisc; + struct list_head rx_mode_node; + netdevice_tracker rx_mode_tracker; + struct netdev_hw_addr_list rx_mode_addr_cache; #ifdef CONFIG_LOCKDEP unsigned char nested_level; #endif @@ -2316,9 +2336,6 @@ struct net_device { #if IS_ENABLED(CONFIG_ATALK) void *atalk_ptr; #endif -#if IS_ENABLED(CONFIG_AX25) - struct ax25_dev __rcu *ax25_ptr; -#endif #if IS_ENABLED(CONFIG_CFG80211) struct wireless_dev *ieee80211_ptr; #endif @@ -5004,6 +5021,14 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, int (*unsync)(struct net_device *, const unsigned char *)); void __hw_addr_init(struct netdev_hw_addr_list *list); +void __hw_addr_flush(struct netdev_hw_addr_list *list); +int __hw_addr_list_snapshot(struct netdev_hw_addr_list *snap, + const struct netdev_hw_addr_list *list, + int addr_len, struct netdev_hw_addr_list *cache); +void __hw_addr_list_reconcile(struct netdev_hw_addr_list *real_list, + struct netdev_hw_addr_list *work, + struct netdev_hw_addr_list *ref, int addr_len, + struct netdev_hw_addr_list *cache); /* Functions used for device addresses handling */ void dev_addr_mod(struct net_device *dev, unsigned int offset, diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 77c778d84d4c..5a1c5c336fa4 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -146,6 +146,9 @@ struct xt_match { /* Called when user tries to insert an entry of this type. */ int (*checkentry)(const struct xt_mtchk_param *); + /* Called to validate hooks based on the match configuration. */ + int (*check_hooks)(const struct xt_mtchk_param *); + /* Called when entry of this type deleted. */ void (*destroy)(const struct xt_mtdtor_param *); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT @@ -187,6 +190,9 @@ struct xt_target { /* Should return 0 on success or an error code otherwise (-Exxxx). */ int (*checkentry)(const struct xt_tgchk_param *); + /* Called to validate hooks based on the target configuration. */ + int (*check_hooks)(const struct xt_tgchk_param *); + /* Called when entry of this type deleted. */ void (*destroy)(const struct xt_tgdtor_param *); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT @@ -279,8 +285,10 @@ bool xt_find_jump_offset(const unsigned int *offsets, int xt_check_proc_name(const char *name, unsigned int size); +int xt_check_hooks_match(struct xt_mtchk_param *par); int xt_check_match(struct xt_mtchk_param *, unsigned int size, u16 proto, bool inv_proto); +int xt_check_hooks_target(struct xt_tgchk_param *par); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u16 proto, bool inv_proto); @@ -297,9 +305,11 @@ struct xt_counters *xt_counters_alloc(unsigned int counters); struct xt_table *xt_register_table(struct net *net, const struct xt_table *table, + const struct nf_hook_ops *template_ops, struct xt_table_info *bootstrap, struct xt_table_info *newinfo); -void *xt_unregister_table(struct xt_table *table); +void xt_unregister_table_pre_exit(struct net *net, u8 af, const char *name); +struct xt_table *xt_unregister_table_exit(struct net *net, u8 af, const char *name); struct xt_table_info *xt_replace_table(struct xt_table *table, unsigned int num_counters, diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index a40aaf645fa4..05631a25e622 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -53,7 +53,6 @@ int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops); void arpt_unregister_table(struct net *net, const char *name); -void arpt_unregister_table_pre_exit(struct net *net, const char *name); extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 132b0e4a6d4d..13593391d605 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -26,7 +26,6 @@ int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops); -void ipt_unregister_table_pre_exit(struct net *net, const char *name); void ipt_unregister_table_exit(struct net *net, const char *name); /* Standard entry. */ diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 8b8885a73c76..c6d5b927830d 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -27,7 +27,6 @@ extern void *ip6t_alloc_initial_table(const struct xt_table *); int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, const struct nf_hook_ops *ops); -void ip6t_unregister_table_pre_exit(struct net *net, const char *name); void ip6t_unregister_table_exit(struct net *net, const char *name); extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 8dd79a3f3d66..4623262da3c0 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -109,6 +109,7 @@ struct nfs_open_context { #define NFS_CONTEXT_BAD (2) #define NFS_CONTEXT_UNLOCK (3) #define NFS_CONTEXT_FILE_OPEN (4) +#define NFS_CONTEXT_WRITE_SYNC (5) struct nfs4_threshold *mdsthreshold; struct list_head list; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index ff1f12aa73d2..fcbd21b5685f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1611,12 +1611,15 @@ struct nfs42_listxattrsres { struct nfs42_removexattrargs { struct nfs4_sequence_args seq_args; struct nfs_fh *fh; + const u32 *bitmask; const char *xattr_name; }; struct nfs42_removexattrres { struct nfs4_sequence_res seq_res; struct nfs4_change_info cinfo; + struct nfs_fattr *fattr; + const struct nfs_server *server; }; #endif /* CONFIG_NFS_V4_2 */ diff --git a/include/linux/nstree.h b/include/linux/nstree.h index 175e4625bfa6..5b64d4572881 100644 --- a/include/linux/nstree.h +++ b/include/linux/nstree.h @@ -61,7 +61,7 @@ static inline void __ns_tree_add(struct ns_common *ns, struct ns_tree_root *ns_t /** * ns_tree_add_raw - Add a namespace to a namespace - * @ns: Namespace to add + * @__ns: Namespace to add * * This function adds a namespace to the appropriate namespace tree * without assigning a id. @@ -70,7 +70,7 @@ static inline void __ns_tree_add(struct ns_common *ns, struct ns_tree_root *ns_t /** * ns_tree_add - Add a namespace to a namespace tree - * @ns: Namespace to add + * @__ns: Namespace to add * * This function assigns a new id to the namespace and adds it to the * appropriate namespace tree and list. @@ -81,7 +81,7 @@ static inline void __ns_tree_add(struct ns_common *ns, struct ns_tree_root *ns_t /** * ns_tree_remove - Remove a namespace from a namespace tree - * @ns: Namespace to remove + * @__ns: Namespace to remove * * This function removes a namespace from the appropriate namespace * tree and list. diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h index 682f81046345..d674d8ab26e6 100644 --- a/include/linux/nvme-auth.h +++ b/include/linux/nvme-auth.h @@ -49,9 +49,9 @@ int nvme_auth_augmented_challenge(u8 hmac_id, const u8 *skey, size_t skey_len, int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid); int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm, u8 *host_key, size_t host_key_len); -int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, - const u8 *ctrl_key, size_t ctrl_key_len, - u8 *sess_key, size_t sess_key_len); +int nvme_auth_gen_session_key(struct crypto_kpp *dh_tfm, + const u8 *public_key, size_t public_key_len, + u8 *sess_key, size_t sess_key_len, u8 hash_id); int nvme_auth_generate_psk(u8 hmac_id, const u8 *skey, size_t skey_len, const u8 *c1, const u8 *c2, size_t hash_len, u8 **ret_psk, size_t *ret_len); diff --git a/include/linux/of.h b/include/linux/of.h index 2b95777f16f6..959786f8f196 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -410,7 +410,7 @@ extern int of_alias_get_id(const struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); bool of_machine_compatible_match(const char *const *compats); -bool of_machine_device_match(const struct of_device_id *matches); +const struct of_device_id *of_machine_get_match(const struct of_device_id *matches); const void *of_machine_get_match_data(const struct of_device_id *matches); /** @@ -880,9 +880,9 @@ static inline bool of_machine_compatible_match(const char *const *compats) return false; } -static inline bool of_machine_device_match(const struct of_device_id *matches) +static inline const struct of_device_id *of_machine_get_match(const struct of_device_id *matches) { - return false; + return NULL; } static inline const void * @@ -990,6 +990,11 @@ static inline int of_numa_init(void) } #endif +static inline bool of_machine_device_match(const struct of_device_id *matches) +{ + return of_machine_get_match(matches) != NULL; +} + static inline struct device_node *of_find_matching_node( struct device_node *from, const struct of_device_id *matches) diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 38a82d65e58e..951d33362268 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -181,7 +181,7 @@ static inline struct alloc_tag *__pgalloc_tag_get(struct page *page) if (get_page_tag_ref(page, &ref, &handle)) { alloc_tag_sub_check(&ref); - if (ref.ct) + if (ref.ct && !is_codetag_empty(&ref)) tag = ct_to_alloc_tag(ref.ct); put_page_tag_ref(handle); } diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 89277808ea61..a5d4b2d8633a 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -83,6 +83,8 @@ struct pinctrl_map; * schmitt-trigger mode is disabled. * @PIN_CONFIG_INPUT_SCHMITT_UV: this will configure an input pin to run in * schmitt-trigger mode. The argument is in uV. + * @PIN_CONFIG_INPUT_VOLTAGE_UV: this will configure the input voltage level of + * the pin. The argument is specified in microvolts. * @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 @@ -115,7 +117,7 @@ struct pinctrl_map; * @PIN_CONFIG_SKEW_DELAY_INPUT_PS: if the pin has independent values for the * programmable skew rate (on inputs) and latch delay (on outputs), then * this parameter specifies the clock skew only. The argument is in ps. - * @PIN_CONFIG_SKEW_DELAY_OUPUT_PS: if the pin has independent values for the + * @PIN_CONFIG_SKEW_DELAY_OUTPUT_PS: if the pin has independent values for the * programmable skew rate (on inputs) and latch delay (on outputs), then * this parameter specifies the latch delay only. The argument is in ps. * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state. @@ -145,6 +147,7 @@ enum pin_config_param { PIN_CONFIG_INPUT_SCHMITT, PIN_CONFIG_INPUT_SCHMITT_ENABLE, PIN_CONFIG_INPUT_SCHMITT_UV, + PIN_CONFIG_INPUT_VOLTAGE_UV, PIN_CONFIG_MODE_LOW_POWER, PIN_CONFIG_MODE_PWM, PIN_CONFIG_LEVEL, diff --git a/include/linux/platform_data/apds990x.h b/include/linux/platform_data/apds990x.h index 94dfbaa365e1..37684f68c04f 100644 --- a/include/linux/platform_data/apds990x.h +++ b/include/linux/platform_data/apds990x.h @@ -31,7 +31,6 @@ * itself. If the GA is zero, driver will use uncovered sensor default values * format: decimal value * APDS_PARAM_SCALE except df which is plain integer. */ -#define APDS_PARAM_SCALE 4096 struct apds990x_chip_factors { int ga; int cf1; @@ -40,11 +39,12 @@ struct apds990x_chip_factors { int irf2; int df; }; +#define APDS_PARAM_SCALE 4096 /** * struct apds990x_platform_data - platform data for apsd990x.c driver * @cf: chip factor data - * @pddrive: IR-led driving current + * @pdrive: IR-led driving current * @ppcount: number of IR pulses used for proximity estimation * @setup_resources: interrupt line setup call back function * @release_resources: interrupt line release call back function diff --git a/include/linux/platform_data/tsl2772.h b/include/linux/platform_data/tsl2772.h index f8ade15a35e2..f042e82b39c3 100644 --- a/include/linux/platform_data/tsl2772.h +++ b/include/linux/platform_data/tsl2772.h @@ -61,9 +61,9 @@ struct tsl2772_lux { * @prox_pulse_count: Number if proximity emitter pulses. * @prox_max_samples_cal: The number of samples that are taken when performing * a proximity calibration. - * @prox_diode Which diode(s) to use for driving the external + * @prox_diode: Which diode(s) to use for driving the external * LED(s) for proximity sensing. - * @prox_power The amount of power to use for the external LED(s). + * @prox_power: The amount of power to use for the external LED(s). */ struct tsl2772_settings { int als_time; diff --git a/include/linux/platform_data/x86/int3472.h b/include/linux/platform_data/x86/int3472.h index dbe745dc88d5..93f1e1fe09b4 100644 --- a/include/linux/platform_data/x86/int3472.h +++ b/include/linux/platform_data/x86/int3472.h @@ -23,6 +23,7 @@ /* PMIC GPIO Types */ #define INT3472_GPIO_TYPE_RESET 0x00 #define INT3472_GPIO_TYPE_POWERDOWN 0x01 +#define INT3472_GPIO_TYPE_STROBE 0x02 #define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b #define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c #define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d @@ -32,6 +33,7 @@ #define INT3472_PDEV_MAX_NAME_LEN 23 #define INT3472_MAX_SENSOR_GPIOS 3 +#define INT3472_MAX_LEDS 2 #define INT3472_MAX_REGULATORS 3 /* E.g. "dovdd\0" */ @@ -122,16 +124,17 @@ struct int3472_discrete_device { u8 imgclk_index; } clock; - struct int3472_pled { + struct int3472_led { struct led_classdev classdev; struct led_lookup_data lookup; char name[INT3472_LED_MAX_NAME_LEN]; struct gpio_desc *gpio; - } pled; + } leds[INT3472_MAX_LEDS]; struct int3472_discrete_quirks quirks; unsigned int ngpios; /* how many GPIOs have we seen */ + unsigned int n_leds; /* how many LEDs have we registered */ unsigned int n_sensor_gpios; /* how many have we mapped to sensor */ unsigned int n_regulator_gpios; /* how many have we mapped to a regulator */ struct gpiod_lookup_table gpios; @@ -161,7 +164,8 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472, const char *second_sensor); void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472); -int skl_int3472_register_pled(struct int3472_discrete_device *int3472, struct gpio_desc *gpio); -void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472); +int skl_int3472_register_led(struct int3472_discrete_device *int3472, struct gpio_desc *gpio, + const char *con_id); +void skl_int3472_unregister_leds(struct int3472_discrete_device *int3472); #endif diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index c417abd2ab70..d5b08313cf11 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -17,6 +17,7 @@ #define MAX17042_DEFAULT_VMAX (4500) /* LiHV cell max */ #define MAX17042_DEFAULT_TEMP_MIN (0) /* For sys without temp sensor */ #define MAX17042_DEFAULT_TEMP_MAX (700) /* 70 degrees Celcius */ +#define MAX17042_DEFAULT_TASK_PERIOD (5760) /* Consider RepCap which is less then 10 units below FullCAP full */ #define MAX17042_FULL_THRESHOLD 10 @@ -105,7 +106,7 @@ enum max17042_register { MAX17042_OCV = 0xEE, - MAX17042_OCVInternal = 0xFB, /* MAX17055 VFOCV */ + MAX17042_OCVInternal = 0xFB, /* MAX17055/77759 VFOCV */ MAX17042_VFSOC = 0xFF, }; @@ -156,7 +157,7 @@ enum max17055_register { MAX17055_AtAvCap = 0xDF, }; -/* Registers specific to max17047/50/55 */ +/* Registers specific to max17047/50/55/77759 */ enum max17047_register { MAX17047_QRTbl00 = 0x12, MAX17047_FullSOCThr = 0x13, @@ -167,12 +168,32 @@ enum max17047_register { MAX17047_QRTbl30 = 0x42, }; +enum max77759_register { + MAX77759_AvgTA0 = 0x26, + MAX77759_AtTTF = 0x33, + MAX77759_Tconvert = 0x34, + MAX77759_AvgCurrent0 = 0x3B, + MAX77759_THMHOT = 0x40, + MAX77759_CTESample = 0x41, + MAX77759_ISys = 0x43, + MAX77759_AvgVCell0 = 0x44, + MAX77759_RlxSOC = 0x47, + MAX77759_AvgISys = 0x4B, + MAX77759_QH0 = 0x4C, + MAX77759_MixAtFull = 0x4F, + MAX77759_VSys = 0xB1, + MAX77759_TAlrtTh2 = 0xB2, + MAX77759_VByp = 0xB3, + MAX77759_IIn = 0xD0, +}; + enum max170xx_chip_type { MAXIM_DEVICE_TYPE_UNKNOWN = 0, MAXIM_DEVICE_TYPE_MAX17042, MAXIM_DEVICE_TYPE_MAX17047, MAXIM_DEVICE_TYPE_MAX17050, MAXIM_DEVICE_TYPE_MAX17055, + MAXIM_DEVICE_TYPE_MAX77759, MAXIM_DEVICE_TYPE_NUM }; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 360ffdf272da..7a5e4c3242a0 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -210,6 +210,9 @@ enum power_supply_usb_type { POWER_SUPPLY_USB_TYPE_PD, /* Power Delivery Port */ POWER_SUPPLY_USB_TYPE_PD_DRP, /* PD Dual Role Port */ POWER_SUPPLY_USB_TYPE_PD_PPS, /* PD Programmable Power Supply */ + /* PD Standard Power Range Adjustable Voltage Supply */ + POWER_SUPPLY_USB_TYPE_PD_SPR_AVS, + POWER_SUPPLY_USB_TYPE_PD_PPS_SPR_AVS, /* Supports both PD PPS + SPR AVS */ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */ }; diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h index b7e57fdbd413..b1d1f46d7d3b 100644 --- a/include/linux/ppp_defs.h +++ b/include/linux/ppp_defs.h @@ -8,6 +8,7 @@ #define _PPP_DEFS_H_ #include <linux/crc-ccitt.h> +#include <linux/skbuff.h> #include <uapi/linux/ppp_defs.h> #define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c) @@ -25,4 +26,19 @@ static inline bool ppp_proto_is_valid(u16 proto) return !!((proto & 0x0101) == 0x0001); } +/** + * ppp_skb_is_compressed_proto - checks if PPP protocol in a skb is compressed + * @skb: skb to check + * + * Check if the PPP protocol field is compressed (the least significant + * bit of the most significant octet is 1). skb->data must point to the PPP + * protocol header. + * + * Return: Whether the PPP protocol field is compressed. + */ +static inline bool ppp_skb_is_compressed_proto(const struct sk_buff *skb) +{ + return unlikely(skb->data[0] & 0x01); +} + #endif /* _PPP_DEFS_H_ */ diff --git a/include/linux/printk.h b/include/linux/printk.h index 54e3c621fec3..f594c1266bfd 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -815,7 +815,8 @@ static inline void print_hex_dump_devel(const char *prefix_str, int prefix_type, #endif /** - * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params + * print_hex_dump_bytes - shorthand form of print_hex_dump_debug() with default + * params * @prefix_str: string to prefix each line with; * caller supplies trailing spaces for alignment if desired * @prefix_type: controls whether prefix of an offset, address, or none @@ -823,7 +824,7 @@ static inline void print_hex_dump_devel(const char *prefix_str, int prefix_type, * @buf: data blob to dump * @len: number of bytes in the @buf * - * Calls print_hex_dump(), with log level of KERN_DEBUG, + * Calls print_hex_dump_debug(), with log level of KERN_DEBUG, * rowsize of 16, groupsize of 1, and ASCII output included. */ #define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 2abba7552605..e3bc44225692 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -261,6 +261,35 @@ static inline void list_replace_rcu(struct list_head *old, old->prev = LIST_POISON2; } +static inline void __list_splice_rcu(struct list_head *list, + struct list_head *prev, + struct list_head *next) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + last->next = next; + first->prev = prev; + next->prev = last; + rcu_assign_pointer(list_next_rcu(prev), first); +} + +/** + * list_splice_rcu - splice a non-RCU list into an RCU-protected list, + * designed for stacks. + * @list: the non RCU-protected list to splice + * @head: the place in the existing RCU-protected list to splice + * + * The list pointed to by @head can be RCU-read traversed concurrently with + * this function. + */ +static inline void list_splice_rcu(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice_rcu(list, head, head->next); +} + /** * __list_splice_init_rcu - join an RCU-protected list into an existing list. * @list: the RCU-protected list to splice diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h index 344ff41c22c7..4070537d6542 100644 --- a/include/linux/remoteproc/mtk_scp.h +++ b/include/linux/remoteproc/mtk_scp.h @@ -58,7 +58,7 @@ int scp_ipi_register(struct mtk_scp *scp, u32 id, scp_ipi_handler_t handler, void *priv); void scp_ipi_unregister(struct mtk_scp *scp, u32 id); -int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len, +int scp_ipi_send(struct mtk_scp *scp, u32 id, const void *buf, unsigned int len, unsigned int wait); unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp); diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h index 015c8298bebc..fc2f596a6df1 100644 --- a/include/linux/rhashtable-types.h +++ b/include/linux/rhashtable-types.h @@ -12,6 +12,7 @@ #include <linux/alloc_tag.h> #include <linux/atomic.h> #include <linux/compiler.h> +#include <linux/irq_work_types.h> #include <linux/mutex.h> #include <linux/workqueue_types.h> @@ -49,6 +50,7 @@ typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, * @head_offset: Offset of rhash_head in struct to be hashed * @max_size: Maximum size while expanding * @min_size: Minimum size while shrinking + * @insecure_elasticity: Set to true to disable chain length checks * @automatic_shrinking: Enable automatic shrinking of tables * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) * @obj_hashfn: Function to hash object @@ -61,6 +63,7 @@ struct rhashtable_params { u16 head_offset; unsigned int max_size; u16 min_size; + bool insecure_elasticity; bool automatic_shrinking; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; @@ -75,6 +78,7 @@ struct rhashtable_params { * @p: Configuration parameters * @rhlist: True if this is an rhltable * @run_work: Deferred worker to expand/shrink asynchronously + * @run_irq_work: Bounces the @run_work kick through hard IRQ context. * @mutex: Mutex to protect current/future table swapping * @lock: Spin lock to protect walker list * @nelems: Number of elements in table @@ -86,6 +90,7 @@ struct rhashtable { struct rhashtable_params p; bool rhlist; struct work_struct run_work; + struct irq_work run_irq_work; struct mutex mutex; spinlock_t lock; atomic_t nelems; diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 0480509a6339..ef5230cece36 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -20,6 +20,7 @@ #include <linux/err.h> #include <linux/errno.h> +#include <linux/irq_work.h> #include <linux/jhash.h> #include <linux/list_nulls.h> #include <linux/workqueue.h> @@ -821,14 +822,15 @@ slow_path: goto out; } - if (elasticity <= 0) + if (elasticity <= 0 && !params.insecure_elasticity) goto slow_path; data = ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_max(ht, tbl))) goto out_unlock; - if (unlikely(rht_grow_above_100(ht, tbl))) + if (unlikely(rht_grow_above_100(ht, tbl)) && + !params.insecure_elasticity) goto slow_path; /* Inserting at head of list makes unlocking free. */ @@ -846,7 +848,7 @@ slow_path: rht_assign_unlock(tbl, bkt, obj, flags); if (rht_grow_above_75(ht, tbl)) - schedule_work(&ht->run_work); + irq_work_queue(&ht->run_irq_work); data = NULL; out: diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index fb7ab9165645..83266ce14642 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -182,11 +182,11 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, rpmsg_rx_cb_t cb, void *priv, struct rpmsg_channel_info chinfo); -int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_send(struct rpmsg_endpoint *ept, const void *data, int len); +int rpmsg_sendto(struct rpmsg_endpoint *ept, const void *data, int len, u32 dst); -int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_trysend(struct rpmsg_endpoint *ept, const void *data, int len); +int rpmsg_trysendto(struct rpmsg_endpoint *ept, const void *data, int len, u32 dst); __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, poll_table *wait); @@ -249,7 +249,7 @@ static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev return NULL; } -static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) +static inline int rpmsg_send(struct rpmsg_endpoint *ept, const void *data, int len) { /* This shouldn't be possible */ WARN_ON(1); @@ -257,7 +257,7 @@ static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) return -ENXIO; } -static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, +static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, const void *data, int len, u32 dst) { /* This shouldn't be possible */ @@ -267,7 +267,8 @@ static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, } -static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) +static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, const void *data, + int len) { /* This shouldn't be possible */ WARN_ON(1); @@ -275,7 +276,7 @@ static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) return -ENXIO; } -static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, +static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, const void *data, int len, u32 dst) { /* This shouldn't be possible */ diff --git a/include/linux/rpmsg/mtk_rpmsg.h b/include/linux/rpmsg/mtk_rpmsg.h index 363b60178040..badcbc89917f 100644 --- a/include/linux/rpmsg/mtk_rpmsg.h +++ b/include/linux/rpmsg/mtk_rpmsg.h @@ -25,7 +25,7 @@ struct mtk_rpmsg_info { ipi_handler_t handler, void *priv); void (*unregister_ipi)(struct platform_device *pdev, u32 id); int (*send_ipi)(struct platform_device *pdev, u32 id, - void *buf, unsigned int len, unsigned int wait); + const void *buf, unsigned int len, unsigned int wait); int ns_ipi_id; }; diff --git a/include/linux/rseq.h b/include/linux/rseq.h index b9d62fc2140d..7ef79b25e714 100644 --- a/include/linux/rseq.h +++ b/include/linux/rseq.h @@ -9,6 +9,11 @@ void __rseq_handle_slowpath(struct pt_regs *regs); +static __always_inline bool rseq_v2(struct task_struct *t) +{ + return IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY) && likely(t->rseq.event.has_rseq > 1); +} + /* Invoked from resume_user_mode_work() */ static inline void rseq_handle_slowpath(struct pt_regs *regs) { @@ -16,8 +21,7 @@ static inline void rseq_handle_slowpath(struct pt_regs *regs) if (current->rseq.event.slowpath) __rseq_handle_slowpath(regs); } else { - /* '&' is intentional to spare one conditional branch */ - if (current->rseq.event.sched_switch & current->rseq.event.has_rseq) + if (current->rseq.event.sched_switch && current->rseq.event.has_rseq) __rseq_handle_slowpath(regs); } } @@ -30,9 +34,9 @@ void __rseq_signal_deliver(int sig, struct pt_regs *regs); */ static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { - if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) { - /* '&' is intentional to spare one conditional branch */ - if (current->rseq.event.has_rseq & current->rseq.event.user_irq) + if (rseq_v2(current)) { + /* has_rseq is implied in rseq_v2() */ + if (current->rseq.event.user_irq) __rseq_signal_deliver(ksig->sig, regs); } else { if (current->rseq.event.has_rseq) @@ -50,15 +54,22 @@ static __always_inline void rseq_sched_switch_event(struct task_struct *t) { struct rseq_event *ev = &t->rseq.event; - if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) { + /* + * Only apply the user_irq optimization for RSEQ ABI V2 registrations. + * Legacy users like TCMalloc rely on the original ABI V1 behaviour + * which updates IDs on every context swtich. + */ + if (rseq_v2(t)) { /* - * Avoid a boat load of conditionals by using simple logic - * to determine whether NOTIFY_RESUME needs to be raised. + * Avoid a boat load of conditionals by using simple logic to + * determine whether TIF_NOTIFY_RESUME or TIF_RSEQ needs to be + * raised. * - * It's required when the CPU or MM CID has changed or - * the entry was from user space. + * It's required when the CPU or MM CID has changed or the entry + * was via interrupt from user space. ev->has_rseq does not have + * to be evaluated here because rseq_v2() implies has_rseq. */ - bool raise = (ev->user_irq | ev->ids_changed) & ev->has_rseq; + bool raise = ev->user_irq | ev->ids_changed; if (raise) { ev->sched_switch = true; @@ -66,6 +77,7 @@ static __always_inline void rseq_sched_switch_event(struct task_struct *t) } } else { if (ev->has_rseq) { + t->rseq.event.ids_changed = true; t->rseq.event.sched_switch = true; rseq_raise_notify_resume(t); } @@ -119,6 +131,8 @@ static inline void rseq_virt_userspace_exit(void) static inline void rseq_reset(struct task_struct *t) { + /* Protect against preemption and membarrier IPI */ + guard(irqsave)(); memset(&t->rseq, 0, sizeof(t->rseq)); t->rseq.ids.cpu_id = RSEQ_CPU_ID_UNINITIALIZED; } @@ -159,6 +173,7 @@ static inline unsigned int rseq_alloc_align(void) } #else /* CONFIG_RSEQ */ +static inline bool rseq_v2(struct task_struct *t) { return false; } static inline void rseq_handle_slowpath(struct pt_regs *regs) { } static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { } static inline void rseq_sched_switch_event(struct task_struct *t) { } diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h index f11ebd34f8b9..2d0295df5107 100644 --- a/include/linux/rseq_entry.h +++ b/include/linux/rseq_entry.h @@ -111,6 +111,20 @@ static __always_inline void rseq_slice_clear_grant(struct task_struct *t) t->rseq.slice.state.granted = false; } +/* + * Open coded, so it can be invoked within a user access region. + * + * This clears the user space state of the time slice extensions field only when + * the task has registered the optimized RSEQ_ABI V2. Some legacy registrations, + * e.g. TCMalloc, have conflicting non-ABI fields in struct RSEQ, which would be + * overwritten by an unconditional write. + */ +#define rseq_slice_clear_user(rseq, efault) \ +do { \ + if (rseq_slice_extension_enabled()) \ + unsafe_put_user(0U, &rseq->slice_ctrl.all, efault); \ +} while (0) + static __always_inline bool __rseq_grant_slice_extension(bool work_pending) { struct task_struct *curr = current; @@ -230,10 +244,10 @@ static __always_inline bool rseq_slice_extension_enabled(void) { return false; } static __always_inline bool rseq_arm_slice_extension_timer(void) { return false; } static __always_inline void rseq_slice_clear_grant(struct task_struct *t) { } static __always_inline bool rseq_grant_slice_extension(unsigned long ti_work, unsigned long mask) { return false; } +#define rseq_slice_clear_user(rseq, efault) do { } while (0) #endif /* !CONFIG_RSEQ_SLICE_EXTENSION */ bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr); -bool rseq_debug_validate_ids(struct task_struct *t); static __always_inline void rseq_note_user_irq_entry(void) { @@ -353,43 +367,6 @@ efault: return false; } -/* - * On debug kernels validate that user space did not mess with it if the - * debug branch is enabled. - */ -bool rseq_debug_validate_ids(struct task_struct *t) -{ - struct rseq __user *rseq = t->rseq.usrptr; - u32 cpu_id, uval, node_id; - - /* - * On the first exit after registering the rseq region CPU ID is - * RSEQ_CPU_ID_UNINITIALIZED and node_id in user space is 0! - */ - node_id = t->rseq.ids.cpu_id != RSEQ_CPU_ID_UNINITIALIZED ? - cpu_to_node(t->rseq.ids.cpu_id) : 0; - - scoped_user_read_access(rseq, efault) { - unsafe_get_user(cpu_id, &rseq->cpu_id_start, efault); - if (cpu_id != t->rseq.ids.cpu_id) - goto die; - unsafe_get_user(uval, &rseq->cpu_id, efault); - if (uval != cpu_id) - goto die; - unsafe_get_user(uval, &rseq->node_id, efault); - if (uval != node_id) - goto die; - unsafe_get_user(uval, &rseq->mm_cid, efault); - if (uval != t->rseq.ids.mm_cid) - goto die; - } - return true; -die: - t->rseq.event.fatal = true; -efault: - return false; -} - #endif /* RSEQ_BUILD_SLOW_PATH */ /* @@ -499,37 +476,50 @@ efault: * faults in task context are fatal too. */ static rseq_inline -bool rseq_set_ids_get_csaddr(struct task_struct *t, struct rseq_ids *ids, - u32 node_id, u64 *csaddr) +bool rseq_set_ids_get_csaddr(struct task_struct *t, struct rseq_ids *ids, u64 *csaddr) { struct rseq __user *rseq = t->rseq.usrptr; - if (static_branch_unlikely(&rseq_debug_enabled)) { - if (!rseq_debug_validate_ids(t)) - return false; - } - scoped_user_rw_access(rseq, efault) { + /* Validate the R/O fields for debug and optimized mode */ + if (static_branch_unlikely(&rseq_debug_enabled) || rseq_v2(t)) { + u32 cpu_id, uval; + + unsafe_get_user(cpu_id, &rseq->cpu_id_start, efault); + if (cpu_id != t->rseq.ids.cpu_id) + goto die; + unsafe_get_user(uval, &rseq->cpu_id, efault); + if (uval != cpu_id) + goto die; + unsafe_get_user(uval, &rseq->node_id, efault); + if (uval != t->rseq.ids.node_id) + goto die; + unsafe_get_user(uval, &rseq->mm_cid, efault); + if (uval != t->rseq.ids.mm_cid) + goto die; + } + unsafe_put_user(ids->cpu_id, &rseq->cpu_id_start, efault); unsafe_put_user(ids->cpu_id, &rseq->cpu_id, efault); - unsafe_put_user(node_id, &rseq->node_id, efault); + unsafe_put_user(ids->node_id, &rseq->node_id, efault); unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault); if (csaddr) unsafe_get_user(*csaddr, &rseq->rseq_cs, efault); - /* Open coded, so it's in the same user access region */ - if (rseq_slice_extension_enabled()) { - /* Unconditionally clear it, no point in conditionals */ - unsafe_put_user(0U, &rseq->slice_ctrl.all, efault); - } + /* RSEQ ABI V2 only operations */ + if (rseq_v2(t)) + rseq_slice_clear_user(rseq, efault); } rseq_slice_clear_grant(t); /* Cache the new values */ - t->rseq.ids.cpu_cid = ids->cpu_cid; + t->rseq.ids = *ids; rseq_stat_inc(rseq_stats.ids); rseq_trace_update(t, ids); return true; + +die: + t->rseq.event.fatal = true; efault: return false; } @@ -539,11 +529,11 @@ efault: * is in a critical section. */ static rseq_inline bool rseq_update_usr(struct task_struct *t, struct pt_regs *regs, - struct rseq_ids *ids, u32 node_id) + struct rseq_ids *ids) { u64 csaddr; - if (!rseq_set_ids_get_csaddr(t, ids, node_id, &csaddr)) + if (!rseq_set_ids_get_csaddr(t, ids, &csaddr)) return false; /* @@ -612,6 +602,14 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t * interrupts disabled */ guard(pagefault)(); + /* + * This optimization is only valid when the task registered for the + * optimized RSEQ_ABI_V2 variant. Some legacy users rely on the original + * RSEQ implementation behaviour which unconditionally updated the IDs. + * rseq_sched_switch_event() ensures that legacy registrations always + * have both sched_switch and ids_changed set, which is compatible with + * the historical TIF_NOTIFY_RESUME behaviour. + */ if (likely(!t->rseq.event.ids_changed)) { struct rseq __user *rseq = t->rseq.usrptr; /* @@ -623,11 +621,9 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t scoped_user_rw_access(rseq, efault) { unsafe_get_user(csaddr, &rseq->rseq_cs, efault); - /* Open coded, so it's in the same user access region */ - if (rseq_slice_extension_enabled()) { - /* Unconditionally clear it, no point in conditionals */ - unsafe_put_user(0U, &rseq->slice_ctrl.all, efault); - } + /* RSEQ ABI V2 only operations */ + if (rseq_v2(t)) + rseq_slice_clear_user(rseq, efault); } rseq_slice_clear_grant(t); @@ -640,12 +636,12 @@ static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct t } struct rseq_ids ids = { - .cpu_id = task_cpu(t), - .mm_cid = task_mm_cid(t), + .cpu_id = task_cpu(t), + .mm_cid = task_mm_cid(t), + .node_id = cpu_to_node(ids.cpu_id), }; - u32 node_id = cpu_to_node(ids.cpu_id); - return rseq_update_usr(t, regs, &ids, node_id); + return rseq_update_usr(t, regs, &ids); efault: return false; } diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h index 0b42045988db..85739a63e85e 100644 --- a/include/linux/rseq_types.h +++ b/include/linux/rseq_types.h @@ -9,6 +9,12 @@ #ifdef CONFIG_RSEQ struct rseq; +/* + * rseq_event::has_rseq contains the ABI version number so preserving it + * in AND operations requires a mask. + */ +#define RSEQ_HAS_RSEQ_VERSION_MASK 0xff + /** * struct rseq_event - Storage for rseq related event management * @all: Compound to initialize and clear the data efficiently @@ -17,7 +23,8 @@ struct rseq; * exit to user * @ids_changed: Indicator that IDs need to be updated * @user_irq: True on interrupt entry from user mode - * @has_rseq: True if the task has a rseq pointer installed + * @has_rseq: Greater than 0 if the task has a rseq pointer installed. + * Contains the RSEQ version number * @error: Compound error code for the slow path to analyze * @fatal: User space data corrupted or invalid * @slowpath: Indicator that slow path processing via TIF_NOTIFY_RESUME @@ -59,8 +66,9 @@ struct rseq_event { * compiler emit a single compare on 64-bit * @cpu_id: The CPU ID which was written last to user space * @mm_cid: The MM CID which was written last to user space + * @node_id: The node ID which was written last to user space * - * @cpu_id and @mm_cid are updated when the data is written to user space. + * @cpu_id, @mm_cid and @node_id are updated when the data is written to user space. */ struct rseq_ids { union { @@ -70,6 +78,7 @@ struct rseq_ids { u32 mm_cid; }; }; + u32 node_id; }; /** diff --git a/include/linux/scc.h b/include/linux/scc.h deleted file mode 100644 index 745eabd17c10..000000000000 --- a/include/linux/scc.h +++ /dev/null @@ -1,86 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* $Id: scc.h,v 1.29 1997/04/02 14:56:45 jreuter Exp jreuter $ */ -#ifndef _SCC_H -#define _SCC_H - -#include <uapi/linux/scc.h> - - -enum {TX_OFF, TX_ON}; /* command for scc_key_trx() */ - -/* Vector masks in RR2B */ - -#define VECTOR_MASK 0x06 -#define TXINT 0x00 -#define EXINT 0x02 -#define RXINT 0x04 -#define SPINT 0x06 - -#ifdef CONFIG_SCC_DELAY -#define Inb(port) inb_p(port) -#define Outb(port, val) outb_p(val, port) -#else -#define Inb(port) inb(port) -#define Outb(port, val) outb(val, port) -#endif - -/* SCC channel control structure for KISS */ - -struct scc_kiss { - unsigned char txdelay; /* Transmit Delay 10 ms/cnt */ - unsigned char persist; /* Persistence (0-255) as a % */ - unsigned char slottime; /* Delay to wait on persistence hit */ - unsigned char tailtime; /* Delay after last byte written */ - unsigned char fulldup; /* Full Duplex mode 0=CSMA 1=DUP 2=ALWAYS KEYED */ - unsigned char waittime; /* Waittime before any transmit attempt */ - unsigned int maxkeyup; /* Maximum time to transmit (seconds) */ - unsigned int mintime; /* Minimal offtime after MAXKEYUP timeout (seconds) */ - unsigned int idletime; /* Maximum idle time in ALWAYS KEYED mode (seconds) */ - unsigned int maxdefer; /* Timer for CSMA channel busy limit */ - unsigned char tx_inhibit; /* Transmit is not allowed when set */ - unsigned char group; /* Group ID for AX.25 TX interlocking */ - unsigned char mode; /* 'normal' or 'hwctrl' mode (unused) */ - unsigned char softdcd; /* Use DPLL instead of DCD pin for carrier detect */ -}; - - -/* SCC channel structure */ - -struct scc_channel { - int init; /* channel exists? */ - - struct net_device *dev; /* link to device control structure */ - struct net_device_stats dev_stat;/* device statistics */ - - char brand; /* manufacturer of the board */ - long clock; /* used clock */ - - io_port ctrl; /* I/O address of CONTROL register */ - io_port data; /* I/O address of DATA register */ - io_port special; /* I/O address of special function port */ - int irq; /* Number of Interrupt */ - - char option; - char enhanced; /* Enhanced SCC support */ - - unsigned char wreg[16]; /* Copy of last written value in WRx */ - unsigned char status; /* Copy of R0 at last external interrupt */ - unsigned char dcd; /* DCD status */ - - struct scc_kiss kiss; /* control structure for KISS params */ - struct scc_stat stat; /* statistical information */ - struct scc_modem modem; /* modem information */ - - struct sk_buff_head tx_queue; /* next tx buffer */ - struct sk_buff *rx_buff; /* pointer to frame currently received */ - struct sk_buff *tx_buff; /* pointer to frame currently transmitted */ - - /* Timer */ - struct timer_list tx_t; /* tx timer for this channel */ - struct timer_list tx_wdog; /* tx watchdogs */ - - /* Channel lock */ - spinlock_t lock; /* Channel guard lock */ -}; - -#endif /* defined(_SCC_H) */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 004e6d56a499..ee06cba5c6f5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1002,6 +1002,9 @@ struct task_struct { unsigned sched_rt_mutex:1; #endif + /* Save user-dumpable when mm goes away */ + unsigned user_dumpable:1; + /* Bit to tell TOMOYO we're in execve(): */ unsigned in_execve:1; unsigned in_iowait:1; @@ -1535,7 +1538,7 @@ struct task_struct { /* Used by memcontrol for targeted memcg charge: */ struct mem_cgroup *active_memcg; - /* Cache for current->cgroups->memcg->objcg lookups: */ + /* Cache for current->cgroups->memcg->nodeinfo[nid]->objcg lookups: */ struct obj_cgroup *objcg; #endif diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 1198138cb839..273538200a44 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -33,6 +33,15 @@ struct root_domain; extern void dl_add_task_root_domain(struct task_struct *p); extern void dl_clear_root_domain(struct root_domain *rd); extern void dl_clear_root_domain_cpu(int cpu); +/* + * Return whether moving DL task @p to @new_mask requires moving DL + * bandwidth accounting between root domains. This helper is specific to + * DL bandwidth move accounting semantics and is shared by + * cpuset_can_attach() and set_cpus_allowed_dl() so both paths use the + * same source root-domain test. + */ +extern bool dl_task_needs_bw_move(struct task_struct *p, + const struct cpumask *new_mask); extern u64 dl_cookie; extern bool dl_bw_visited(int cpu, u64 cookie); diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 1a3af2ea2a79..2129e18ada58 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -103,21 +103,25 @@ enum scx_ent_flags { SCX_TASK_IMMED = 1 << 5, /* task is on local DSQ with %SCX_ENQ_IMMED */ /* - * Bits 8 and 9 are used to carry task state: + * Bits 8 to 10 are used to carry task state: * * NONE ops.init_task() not called yet + * INIT_BEGIN ops.init_task() in flight; see sched_ext_dead() * INIT ops.init_task() succeeded, but task can be cancelled * READY fully initialized, but not in sched_ext * ENABLED fully initialized and in sched_ext + * DEAD terminal state set by sched_ext_dead() */ - SCX_TASK_STATE_SHIFT = 8, /* bits 8 and 9 are used to carry task state */ - SCX_TASK_STATE_BITS = 2, + SCX_TASK_STATE_SHIFT = 8, + SCX_TASK_STATE_BITS = 3, SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT, SCX_TASK_NONE = 0 << SCX_TASK_STATE_SHIFT, - SCX_TASK_INIT = 1 << SCX_TASK_STATE_SHIFT, - SCX_TASK_READY = 2 << SCX_TASK_STATE_SHIFT, - SCX_TASK_ENABLED = 3 << SCX_TASK_STATE_SHIFT, + SCX_TASK_INIT_BEGIN = 1 << SCX_TASK_STATE_SHIFT, + SCX_TASK_INIT = 2 << SCX_TASK_STATE_SHIFT, + SCX_TASK_READY = 3 << SCX_TASK_STATE_SHIFT, + SCX_TASK_ENABLED = 4 << SCX_TASK_STATE_SHIFT, + SCX_TASK_DEAD = 5 << SCX_TASK_STATE_SHIFT, /* * Bits 12 and 13 are used to carry reenqueue reason. In addition to diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index dc3975ff1b2e..cf0fd03dd7a2 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -21,6 +21,11 @@ enum hk_type { HK_TYPE_MAX, /* + * HK_TYPE_KTHREAD is now an alias of HK_TYPE_DOMAIN + */ + HK_TYPE_KTHREAD = HK_TYPE_DOMAIN, + + /* * The following housekeeping types are only set by the nohz_full * boot commandline option. So they can share the same value. */ @@ -29,7 +34,6 @@ enum hk_type { HK_TYPE_RCU = HK_TYPE_KERNEL_NOISE, HK_TYPE_MISC = HK_TYPE_KERNEL_NOISE, HK_TYPE_WQ = HK_TYPE_KERNEL_NOISE, - HK_TYPE_KTHREAD = HK_TYPE_KERNEL_NOISE }; #ifdef CONFIG_CPU_ISOLATION diff --git a/include/linux/secure_boot.h b/include/linux/secure_boot.h new file mode 100644 index 000000000000..d17e92351567 --- /dev/null +++ b/include/linux/secure_boot.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2026 Red Hat, Inc. All Rights Reserved. + * + * Author: Coiby Xu <coxu@redhat.com> + */ + +#ifndef _LINUX_SECURE_BOOT_H +#define _LINUX_SECURE_BOOT_H + +#include <linux/types.h> + +#ifdef CONFIG_HAVE_ARCH_GET_SECUREBOOT +/* + * Returns true if the platform secure boot is enabled. + * Returns false if disabled or not supported. + */ +bool arch_get_secureboot(void); +#else +static inline bool arch_get_secureboot(void) { return false; } +#endif + +#endif /* _LINUX_SECURE_BOOT_H */ diff --git a/include/linux/serdev.h b/include/linux/serdev.h index 188c0ba62d50..b6c3d957ec15 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -37,8 +37,8 @@ struct serdev_device_ops { * @nr: Device number on serdev bus. * @ctrl: serdev controller managing this device. * @ops: Device operations. - * @write_comp Completion used by serdev_device_write() internally - * @write_lock Lock to serialize access when writing data + * @write_comp: Completion used by serdev_device_write() internally + * @write_lock: Lock to serialize access when writing data */ struct serdev_device { struct device dev; @@ -57,6 +57,7 @@ struct serdev_device { * structure. * @probe: binds this driver to a serdev device. * @remove: unbinds this driver from the serdev device. + * @shutdown: shut down this serdev device. */ struct serdev_device_driver { struct device_driver driver; @@ -120,7 +121,7 @@ static inline void serdev_device_set_drvdata(struct serdev_device *serdev, void /** * serdev_device_put() - decrement serdev device refcount - * @serdev serdev device. + * @serdev: serdev device. */ static inline void serdev_device_put(struct serdev_device *serdev) { @@ -148,7 +149,7 @@ static inline void serdev_controller_set_drvdata(struct serdev_controller *ctrl, /** * serdev_controller_put() - decrement controller refcount - * @ctrl serdev controller. + * @ctrl: serdev controller. */ static inline void serdev_controller_put(struct serdev_controller *ctrl) { diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index f6a2d3402d76..93a0ba872ebe 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -221,20 +221,6 @@ static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof) extern bool shmem_charge(struct inode *inode, long pages); -#ifdef CONFIG_USERFAULTFD -#ifdef CONFIG_SHMEM -extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd, - struct vm_area_struct *dst_vma, - unsigned long dst_addr, - unsigned long src_addr, - uffd_flags_t flags, - struct folio **foliop); -#else /* !CONFIG_SHMEM */ -#define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \ - src_addr, flags, foliop) ({ BUG(); 0; }) -#endif /* CONFIG_SHMEM */ -#endif /* CONFIG_USERFAULTFD */ - /* * Used space is stored as unsigned 64-bit value in bytes but * quota core supports only signed 64-bit values so use that diff --git a/include/linux/smbdirect.h b/include/linux/smbdirect.h new file mode 100644 index 000000000000..97f5ba730fa7 --- /dev/null +++ b/include/linux/smbdirect.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2025, Stefan Metzmacher + */ + +#ifndef __LINUX_SMBDIRECT_H__ +#define __LINUX_SMBDIRECT_H__ + +#include <linux/types.h> + +/* SMB-DIRECT buffer descriptor V1 structure [MS-SMBD] 2.2.3.1 */ +struct smbdirect_buffer_descriptor_v1 { + __le64 offset; + __le32 token; + __le32 length; +} __packed; + +/* + * Connection parameters mostly from [MS-SMBD] 3.1.1.1 + * + * These are setup and negotiated at the beginning of a + * connection and remain constant unless explicitly changed. + * + * Some values are important for the upper layer. + */ +struct smbdirect_socket_parameters { + __u64 flags; +#define SMBDIRECT_FLAG_PORT_RANGE_ONLY_IB ((__u64)0x1) +#define SMBDIRECT_FLAG_PORT_RANGE_ONLY_IW ((__u64)0x2) + __u32 resolve_addr_timeout_msec; + __u32 resolve_route_timeout_msec; + __u32 rdma_connect_timeout_msec; + __u32 negotiate_timeout_msec; + __u16 initiator_depth; /* limited to U8_MAX */ + __u16 responder_resources; /* limited to U8_MAX */ + __u16 recv_credit_max; + __u16 send_credit_target; + __u32 max_send_size; + __u32 max_fragmented_send_size; + __u32 max_recv_size; + __u32 max_fragmented_recv_size; + __u32 max_read_write_size; + __u32 max_frmr_depth; + __u32 keepalive_interval_msec; + __u32 keepalive_timeout_msec; +} __packed; + +#define SMBDIRECT_FLAG_PORT_RANGE_MASK ( \ + SMBDIRECT_FLAG_PORT_RANGE_ONLY_IB | \ + SMBDIRECT_FLAG_PORT_RANGE_ONLY_IW) + +struct smbdirect_socket; +struct smbdirect_send_batch; +struct smbdirect_mr_io; + +#include <rdma/rw.h> + +u8 smbdirect_netdev_rdma_capable_node_type(struct net_device *netdev); + +bool smbdirect_frwr_is_supported(const struct ib_device_attr *attrs); + +int smbdirect_socket_create_kern(struct net *net, struct smbdirect_socket **_sc); + +int smbdirect_socket_create_accepting(struct rdma_cm_id *id, struct smbdirect_socket **_sc); + +int smbdirect_socket_set_initial_parameters(struct smbdirect_socket *sc, + const struct smbdirect_socket_parameters *sp); + +const struct smbdirect_socket_parameters * +smbdirect_socket_get_current_parameters(struct smbdirect_socket *sc); + +int smbdirect_socket_set_kernel_settings(struct smbdirect_socket *sc, + enum ib_poll_context poll_ctx, + gfp_t gfp_mask); + +#define SMBDIRECT_LOG_ERR 0x0 +#define SMBDIRECT_LOG_INFO 0x1 + +#define SMBDIRECT_LOG_OUTGOING 0x1 +#define SMBDIRECT_LOG_INCOMING 0x2 +#define SMBDIRECT_LOG_READ 0x4 +#define SMBDIRECT_LOG_WRITE 0x8 +#define SMBDIRECT_LOG_RDMA_SEND 0x10 +#define SMBDIRECT_LOG_RDMA_RECV 0x20 +#define SMBDIRECT_LOG_KEEP_ALIVE 0x40 +#define SMBDIRECT_LOG_RDMA_EVENT 0x80 +#define SMBDIRECT_LOG_RDMA_MR 0x100 +#define SMBDIRECT_LOG_RDMA_RW 0x200 +#define SMBDIRECT_LOG_NEGOTIATE 0x400 +void smbdirect_socket_set_logging(struct smbdirect_socket *sc, + void *private_ptr, + bool (*needed)(struct smbdirect_socket *sc, + void *private_ptr, + unsigned int lvl, + unsigned int cls), + void (*vaprintf)(struct smbdirect_socket *sc, + const char *func, + unsigned int line, + void *private_ptr, + unsigned int lvl, + unsigned int cls, + struct va_format *vaf)); + +bool smbdirect_connection_is_connected(struct smbdirect_socket *sc); + +int smbdirect_connection_wait_for_connected(struct smbdirect_socket *sc); + +int smbdirect_socket_bind(struct smbdirect_socket *sc, struct sockaddr *addr); + +void smbdirect_socket_shutdown(struct smbdirect_socket *sc); + +void smbdirect_socket_release(struct smbdirect_socket *sc); + +int smbdirect_connection_send_batch_flush(struct smbdirect_socket *sc, + struct smbdirect_send_batch *batch, + bool is_last); + +/* + * This is only temporary and only needed + * as long as the client still requires + * to use smbdirect_connection_send_single_iter() + */ +struct smbdirect_send_batch_storage { + union { + struct list_head __msg_list; + __aligned_u64 __space[5]; + }; +}; + +struct smbdirect_send_batch * +smbdirect_init_send_batch_storage(struct smbdirect_send_batch_storage *storage, + bool need_invalidate_rkey, + unsigned int remote_key); + +int smbdirect_connection_send_single_iter(struct smbdirect_socket *sc, + struct smbdirect_send_batch *batch, + struct iov_iter *iter, + unsigned int flags, + u32 remaining_data_length); + +int smbdirect_connection_send_wait_zero_pending(struct smbdirect_socket *sc); + +int smbdirect_connection_send_iter(struct smbdirect_socket *sc, + struct iov_iter *iter, + unsigned int flags, + bool need_invalidate, + unsigned int remote_key); + +int smbdirect_connection_recvmsg(struct smbdirect_socket *sc, + struct msghdr *msg, + unsigned int flags); + +int smbdirect_connect(struct smbdirect_socket *sc, + const struct sockaddr *dst); + +int smbdirect_connect_sync(struct smbdirect_socket *sc, + const struct sockaddr *dst); + +int smbdirect_socket_listen(struct smbdirect_socket *sc, int backlog); + +struct smbdirect_socket *smbdirect_socket_accept(struct smbdirect_socket *lsc, + long timeo, + struct proto_accept_arg *arg); + +int smbdirect_connection_rdma_xmit(struct smbdirect_socket *sc, + void *buf, size_t buf_len, + struct smbdirect_buffer_descriptor_v1 *desc, + size_t desc_len, + bool is_read); + +struct smbdirect_mr_io * +smbdirect_connection_register_mr_io(struct smbdirect_socket *sc, + struct iov_iter *iter, + bool writing, + bool need_invalidate); + +void smbdirect_mr_io_fill_buffer_descriptor(struct smbdirect_mr_io *mr, + struct smbdirect_buffer_descriptor_v1 *v1); + +void smbdirect_connection_deregister_mr_io(struct smbdirect_mr_io *mr); + +void smbdirect_connection_legacy_debug_proc_show(struct smbdirect_socket *sc, + unsigned int rdma_readwrite_threshold, + struct seq_file *m); + +#endif /* __LINUX_SMBDIRECT_H__ */ diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h index 6e1b1202e818..58fa1df96347 100644 --- a/include/linux/soc/qcom/apr.h +++ b/include/linux/soc/qcom/apr.h @@ -191,7 +191,7 @@ int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt); gpr_port_t *gpr_alloc_port(gpr_device_t *gdev, struct device *dev, gpr_port_cb cb, void *priv); void gpr_free_port(gpr_port_t *port); -int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt); -int gpr_send_pkt(gpr_device_t *gdev, struct gpr_pkt *pkt); +int gpr_send_port_pkt(gpr_port_t *port, const struct gpr_pkt *pkt); +int gpr_send_pkt(gpr_device_t *gdev, const struct gpr_pkt *pkt); #endif /* __QCOM_APR_H_ */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 7587b1c5d7ec..79513f5941cc 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -701,7 +701,7 @@ struct spi_controller { int (*transfer)(struct spi_device *spi, struct spi_message *mesg); - /* Called on release() to free memory provided by spi_controller */ + /* Called on deregistration to free memory provided by spi_controller */ void (*cleanup)(struct spi_device *spi); /* @@ -1019,7 +1019,7 @@ struct spi_res { * this value may have changed compared to what was requested, depending * on the available snapshotting resolution (DMA transfer, * @ptp_sts_supported is false, etc). - * @ptp_sts_word_post: See @ptp_sts_word_post. The two can be equal (meaning + * @ptp_sts_word_post: See @ptp_sts_word_pre. The two can be equal (meaning * that a single byte should be snapshotted). * If the core takes care of the timestamp (if @ptp_sts_supported is false * for this controller), it will set @ptp_sts_word_pre to 0, and diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 72820503514c..01011113d226 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -99,7 +99,7 @@ static inline void print_stop_info(const char *log_lvl, struct task_struct *task * stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr to pass to @fn() - * @cpus: the cpus to run @fn() on (NULL = run on each online CPU) + * @cpus: the cpus to run @fn() on (NULL = one unspecified online CPU) * * Description: This causes a thread to be scheduled on every CPU, which * will run with interrupts disabled. Each CPU specified by @cpus will @@ -133,7 +133,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr to pass to @fn() - * @cpus: the cpus to run @fn() on (NULL = run on each online CPU) + * @cpus: the cpus to run @fn() on (NULL = one unspecified online CPU) * * Same as above. Avoids nested calls to cpus_read_lock(). * diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index e783132e481f..b1e595c2615b 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -16,6 +16,7 @@ #include <linux/atomic.h> #include <linux/kstrtox.h> #include <linux/proc_fs.h> +#include <linux/wait.h> /* * Each cache requires: @@ -112,7 +113,11 @@ struct cache_detail { int entries; /* fields for communication over channel */ - struct list_head queue; + struct list_head requests; + struct list_head readers; + spinlock_t queue_lock; + wait_queue_head_t queue_wait; + u64 next_seqno; atomic_t writers; /* how many time is /channel open */ time64_t last_close; /* if no writers, when did last close */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index eb4bd62df319..ab61bed2f7af 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -38,6 +38,8 @@ extern unsigned int nlm_debug; do { \ ifdebug(fac) \ __sunrpc_printk(fmt, ##__VA_ARGS__); \ + else \ + no_printk(fmt, ##__VA_ARGS__); \ } while (0) # define dfprintk_rcu(fac, fmt, ...) \ @@ -46,15 +48,15 @@ do { \ rcu_read_lock(); \ __sunrpc_printk(fmt, ##__VA_ARGS__); \ rcu_read_unlock(); \ + } else { \ + no_printk(fmt, ##__VA_ARGS__); \ } \ } while (0) -# define RPC_IFDEBUG(x) x #else # define ifdebug(fac) if (0) -# define dfprintk(fac, fmt, ...) do {} while (0) -# define dfprintk_rcu(fac, fmt, ...) do {} while (0) -# define RPC_IFDEBUG(x) +# define dfprintk(fac, fmt, ...) no_printk(fmt, ##__VA_ARGS__) +# define dfprintk_rcu(fac, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif /* diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index ccba79ebf893..0dbdf3722537 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -95,10 +95,7 @@ struct rpc_task { int tk_rpc_status; /* Result of last RPC operation */ unsigned short tk_flags; /* misc flags */ unsigned short tk_timeouts; /* maj timeouts */ - -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) unsigned short tk_pid; /* debugging aid */ -#endif unsigned char tk_priority : 2,/* Task priority */ tk_garb_retry : 2, tk_cred_retry : 2; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index a11acf5cd63b..4be6204f6630 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -134,25 +134,37 @@ enum { extern u32 svc_max_payload(const struct svc_rqst *rqstp); /* - * RPC Requests and replies are stored in one or more pages. - * We maintain an array of pages for each server thread. - * Requests are copied into these pages as they arrive. Remaining - * pages are available to write the reply into. + * RPC Call and Reply messages each have their own page array. + * rq_pages holds the incoming Call message; rq_respages holds + * the outgoing Reply message. Both arrays are sized to + * svc_serv_maxpages() entries and are allocated dynamically. * - * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each server thread - * needs to allocate more to replace those used in sending. To help keep track - * of these pages we have a receive list where all pages initialy live, and a - * send list where pages are moved to when there are to be part of a reply. + * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each + * server thread needs to allocate more to replace those used in + * sending. * - * We use xdr_buf for holding responses as it fits well with NFS - * read responses (that have a header, and some data pages, and possibly - * a tail) and means we can share some client side routines. + * rq_pages request page contract: * - * The xdr_buf.head kvec always points to the first page in the rq_*pages - * list. The xdr_buf.pages pointer points to the second page on that - * list. xdr_buf.tail points to the end of the first page. - * This assumes that the non-page part of an rpc reply will fit - * in a page - NFSd ensures this. lockd also has no trouble. + * Transport receive paths that move request data pages out of + * rq_pages -- TCP multi-fragment reassembly (svc_tcp_save_pages) + * and RDMA Read I/O (svc_rdma_clear_rqst_pages) -- NULL those + * entries to prevent svc_rqst_release_pages() from freeing pages + * still in transport use, and set rq_pages_nfree to the count. + * svc_alloc_arg() refills only that many rq_pages entries. + * + * For rq_respages, svc_rqst_release_pages() NULLs entries in + * [rq_respages, rq_next_page) after each RPC. svc_alloc_arg() + * refills only that range. + * + * xdr_buf holds responses; the structure fits NFS read responses + * (header, data pages, optional tail) and enables sharing of + * client-side routines. + * + * The xdr_buf.head kvec always points to the first page in the + * rq_*pages list. The xdr_buf.pages pointer points to the second + * page on that list. xdr_buf.tail points to the end of the first + * page. This assumes that the non-page part of an rpc reply will + * fit in a page - NFSd ensures this. lockd also has no trouble. */ /** @@ -162,10 +174,10 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp); * Returns a count of pages or vectors that can hold the maximum * size RPC message for @serv. * - * Each request/reply pair can have at most one "payload", plus two - * pages, one for the request, and one for the reply. - * nfsd_splice_actor() might need an extra page when a READ payload - * is not page-aligned. + * Each page array can hold at most one payload plus two + * overhead pages (one for the RPC header, one for tail data). + * nfsd_splice_actor() might need an extra page when a READ + * payload is not page-aligned. */ static inline unsigned long svc_serv_maxpages(const struct svc_serv *serv) { @@ -175,6 +187,9 @@ static inline unsigned long svc_serv_maxpages(const struct svc_serv *serv) /* * The context of a single thread, including the request currently being * processed. + * + * RPC programs are free to use rq_private to stash thread-local information. + * The sunrpc layer will not access it. */ struct svc_rqst { struct list_head rq_all; /* all threads list */ @@ -201,11 +216,12 @@ struct svc_rqst { struct xdr_stream rq_res_stream; struct folio *rq_scratch_folio; struct xdr_buf rq_res; - unsigned long rq_maxpages; /* num of entries in rq_pages */ - struct page * *rq_pages; - struct page * *rq_respages; /* points into rq_pages */ + unsigned long rq_maxpages; /* entries per page array */ + unsigned long rq_pages_nfree; /* rq_pages entries NULLed by transport */ + struct page * *rq_pages; /* Call buffer pages */ + struct page * *rq_respages; /* Reply buffer pages */ struct page * *rq_next_page; /* next reply page to use */ - struct page * *rq_page_end; /* one past the last page */ + struct page * *rq_page_end; /* one past the last reply page */ struct folio_batch rq_fbatch; struct bio_vec *rq_bvec; @@ -215,7 +231,6 @@ struct svc_rqst { u32 rq_vers; /* program version */ u32 rq_proc; /* procedure number */ u32 rq_prot; /* IP protocol */ - int rq_cachetype; /* catering to nfsd */ unsigned long rq_flags; /* flags field */ ktime_t rq_qtime; /* enqueue time */ @@ -251,7 +266,7 @@ struct svc_rqst { unsigned long bc_to_initval; unsigned int bc_to_retries; unsigned int rq_status_counter; /* RPC processing counter */ - void **rq_lease_breaker; /* The v4 client breaking a lease */ + void *rq_private; /* For use by the service thread */ }; /* bits for rq_flags */ @@ -483,6 +498,21 @@ int svc_generic_rpcbind_set(struct net *net, #define RPC_MAX_ADDRBUFLEN (63U) +/** + * svc_rqst_page_release - release a page associated with an RPC transaction + * @rqstp: RPC transaction context + * @page: page to release + * + * Released pages are batched and freed together, reducing + * allocator pressure under heavy RPC workloads. + */ +static inline void svc_rqst_page_release(struct svc_rqst *rqstp, + struct page *page) +{ + if (!folio_batch_add(&rqstp->rq_fbatch, page_folio(page))) + __folio_batch_release(&rqstp->rq_fbatch); +} + /* * When we want to reduce the size of the reserved space in the response * buffer, we need to take into account the size of any checksum data that diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 57f4fd94166a..df6e08aaad57 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -84,6 +84,9 @@ struct svcxprt_rdma { atomic_t sc_sq_avail; /* SQEs ready to be consumed */ unsigned int sc_sq_depth; /* Depth of SQ */ + atomic_t sc_sq_ticket_head; /* Next ticket to issue */ + atomic_t sc_sq_ticket_tail; /* Ticket currently serving */ + wait_queue_head_t sc_sq_ticket_wait; /* Ticket ordering waitlist */ __be32 sc_fc_credits; /* Forward credits */ u32 sc_max_requests; /* Max requests */ u32 sc_max_bc_requests;/* Backward credits */ @@ -213,6 +216,7 @@ struct svc_rdma_recv_ctxt { */ struct svc_rdma_write_info { struct svcxprt_rdma *wi_rdma; + struct list_head wi_list; const struct svc_rdma_chunk *wi_chunk; @@ -241,7 +245,10 @@ struct svc_rdma_send_ctxt { struct ib_cqe sc_cqe; struct xdr_buf sc_hdrbuf; struct xdr_stream sc_stream; + + struct list_head sc_write_info_list; struct svc_rdma_write_info sc_reply_info; + void *sc_xprt_buf; int sc_page_count; int sc_cur_sge_no; @@ -274,11 +281,14 @@ extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma, extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma, struct svc_rdma_chunk_ctxt *cc, enum dma_data_direction dir); +extern void svc_rdma_write_chunk_release(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *ctxt); extern void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt); -extern int svc_rdma_send_write_list(struct svcxprt_rdma *rdma, - const struct svc_rdma_recv_ctxt *rctxt, - const struct xdr_buf *xdr); +extern int svc_rdma_prepare_write_list(struct svcxprt_rdma *rdma, + const struct svc_rdma_recv_ctxt *rctxt, + struct svc_rdma_send_ctxt *sctxt, + const struct xdr_buf *xdr); extern int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma, const struct svc_rdma_pcl *write_pcl, const struct svc_rdma_pcl *reply_pcl, @@ -306,6 +316,13 @@ extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *rctxt, int status); extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail); +extern int svc_rdma_sq_wait(struct svcxprt_rdma *rdma, + const struct rpc_rdma_cid *cid, int sqecount); +extern int svc_rdma_post_send_err(struct svcxprt_rdma *rdma, + const struct rpc_rdma_cid *cid, + const struct ib_send_wr *bad_wr, + const struct ib_send_wr *first_wr, + int sqecount, int ret); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 152597750f55..b639a6fafcbc 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -290,7 +290,7 @@ xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) /** * xdr_set_scratch_folio - Attach a scratch buffer for decoding data * @xdr: pointer to xdr_stream struct - * @page: an anonymous folio + * @folio: an anonymous folio * * See xdr_set_scratch_buffer(). */ @@ -330,7 +330,7 @@ static inline void xdr_commit_encode(struct xdr_stream *xdr) * xdr_stream_remaining - Return the number of bytes remaining in the stream * @xdr: pointer to struct xdr_stream * - * Return value: + * Returns: * Number of bytes remaining in @xdr before xdr->end */ static inline size_t @@ -350,7 +350,7 @@ ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor, * xdr_align_size - Calculate padded size of an object * @n: Size of an object being XDR encoded (in bytes) * - * Return value: + * Returns: * Size (in bytes) of the object including xdr padding */ static inline size_t @@ -368,7 +368,7 @@ xdr_align_size(size_t n) * This implementation avoids the need for conditional * branches or modulo division. * - * Return value: + * Returns: * Size (in bytes) of the needed XDR pad */ static inline size_t xdr_pad_size(size_t n) @@ -380,7 +380,7 @@ static inline size_t xdr_pad_size(size_t n) * xdr_stream_encode_item_present - Encode a "present" list item * @xdr: pointer to xdr_stream * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -399,7 +399,7 @@ static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr) * xdr_stream_encode_item_absent - Encode a "not present" list item * @xdr: pointer to xdr_stream * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -419,7 +419,7 @@ static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr) * @p: address in a buffer into which to encode * @n: boolean value to encode * - * Return value: + * Returns: * Address of item following the encoded boolean */ static inline __be32 *xdr_encode_bool(__be32 *p, u32 n) @@ -433,7 +433,7 @@ static inline __be32 *xdr_encode_bool(__be32 *p, u32 n) * @xdr: pointer to xdr_stream * @n: boolean value to encode * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -453,7 +453,7 @@ static inline int xdr_stream_encode_bool(struct xdr_stream *xdr, __u32 n) * @xdr: pointer to xdr_stream * @n: integer to encode * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -474,7 +474,7 @@ xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n) * @xdr: pointer to xdr_stream * @n: integer to encode * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -495,7 +495,7 @@ xdr_stream_encode_be32(struct xdr_stream *xdr, __be32 n) * @xdr: pointer to xdr_stream * @n: 64-bit integer to encode * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -517,7 +517,7 @@ xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) * @ptr: pointer to void pointer * @len: size of object * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -542,7 +542,7 @@ xdr_stream_encode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t len) * @ptr: pointer to opaque data object * @len: size of object pointed to by @ptr * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -563,7 +563,7 @@ xdr_stream_encode_opaque_fixed(struct xdr_stream *xdr, const void *ptr, size_t l * @ptr: pointer to opaque data object * @len: size of object pointed to by @ptr * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -585,7 +585,7 @@ xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) * @array: array of integers * @array_size: number of elements in @array * - * Return values: + * Returns: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ @@ -608,7 +608,7 @@ xdr_stream_encode_uint32_array(struct xdr_stream *xdr, * xdr_item_is_absent - symbolically handle XDR discriminators * @p: pointer to undecoded discriminator * - * Return values: + * Returns: * %true if the following XDR item is absent * %false if the following XDR item is present */ @@ -621,7 +621,7 @@ static inline bool xdr_item_is_absent(const __be32 *p) * xdr_item_is_present - symbolically handle XDR discriminators * @p: pointer to undecoded discriminator * - * Return values: + * Returns: * %true if the following XDR item is present * %false if the following XDR item is absent */ @@ -635,7 +635,7 @@ static inline bool xdr_item_is_present(const __be32 *p) * @xdr: pointer to xdr_stream * @ptr: pointer to a u32 in which to store the result * - * Return values: + * Returns: * %0 on success * %-EBADMSG on XDR buffer overflow */ @@ -656,7 +656,7 @@ xdr_stream_decode_bool(struct xdr_stream *xdr, __u32 *ptr) * @xdr: pointer to xdr_stream * @ptr: location to store integer * - * Return values: + * Returns: * %0 on success * %-EBADMSG on XDR buffer overflow */ @@ -677,7 +677,7 @@ xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) * @xdr: pointer to xdr_stream * @ptr: location to store integer * - * Return values: + * Returns: * %0 on success * %-EBADMSG on XDR buffer overflow */ @@ -698,7 +698,7 @@ xdr_stream_decode_be32(struct xdr_stream *xdr, __be32 *ptr) * @xdr: pointer to xdr_stream * @ptr: location to store 64-bit integer * - * Return values: + * Returns: * %0 on success * %-EBADMSG on XDR buffer overflow */ @@ -720,7 +720,7 @@ xdr_stream_decode_u64(struct xdr_stream *xdr, __u64 *ptr) * @ptr: location to store data * @len: size of buffer pointed to by @ptr * - * Return values: + * Returns: * %0 on success * %-EBADMSG on XDR buffer overflow */ @@ -746,7 +746,7 @@ xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len) * on @xdr. It is therefore expected that the object it points to should * be processed immediately. * - * Return values: + * Returns: * On success, returns size of object stored in *@ptr * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE if the size of the object would exceed @maxlen @@ -777,7 +777,7 @@ xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxle * @array: location to store the integer array or NULL * @array_size: number of elements to store * - * Return values: + * Returns: * On success, returns number of elements stored in @array * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE if the size of the array exceeds @array_size diff --git a/include/linux/sunrpc/xdrgen/nlm4.h b/include/linux/sunrpc/xdrgen/nlm4.h new file mode 100644 index 000000000000..e95e8f105624 --- /dev/null +++ b/include/linux/sunrpc/xdrgen/nlm4.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Generated by xdrgen. Manual edits will be lost. */ +/* XDR specification file: ../../Documentation/sunrpc/xdr/nlm4.x */ +/* XDR specification modification time: Thu Dec 25 13:10:19 2025 */ + +#ifndef _LINUX_XDRGEN_NLM4_DEF_H +#define _LINUX_XDRGEN_NLM4_DEF_H + +#include <linux/types.h> +#include <linux/sunrpc/xdrgen/_defs.h> + +enum { LM_MAXSTRLEN = 1024 }; + +enum { LM_MAXNAMELEN = 1025 }; + +enum { MAXNETOBJ_SZ = 1024 }; + +typedef opaque netobj; + +enum fsh4_mode { + fsm_DN = 0, + fsm_DR = 1, + fsm_DW = 2, + fsm_DRW = 3, +}; + +typedef enum fsh4_mode fsh4_mode; + +enum fsh4_access { + fsa_NONE = 0, + fsa_R = 1, + fsa_W = 2, + fsa_RW = 3, +}; + +typedef enum fsh4_access fsh4_access; + +enum { SM_MAXSTRLEN = 1024 }; + +typedef u64 uint64; + +typedef s64 int64; + +typedef u32 uint32; + +typedef s32 int32; + +enum nlm4_stats { + NLM4_GRANTED = 0, + NLM4_DENIED = 1, + NLM4_DENIED_NOLOCKS = 2, + NLM4_BLOCKED = 3, + NLM4_DENIED_GRACE_PERIOD = 4, + NLM4_DEADLCK = 5, + NLM4_ROFS = 6, + NLM4_STALE_FH = 7, + NLM4_FBIG = 8, + NLM4_FAILED = 9, +}; + +typedef __be32 nlm4_stats; + +struct nlm4_holder { + bool exclusive; + int32 svid; + netobj oh; + uint64 l_offset; + uint64 l_len; +}; + +struct nlm4_testrply { + nlm4_stats stat; + union { + struct nlm4_holder holder; + } u; +}; + +struct nlm4_stat { + nlm4_stats stat; +}; + +struct nlm4_res { + netobj cookie; + struct nlm4_stat stat; +}; + +struct nlm4_testres { + netobj cookie; + struct nlm4_testrply stat; +}; + +struct nlm4_lock { + string caller_name; + netobj fh; + netobj oh; + int32 svid; + uint64 l_offset; + uint64 l_len; +}; + +struct nlm4_lockargs { + netobj cookie; + bool block; + bool exclusive; + struct nlm4_lock alock; + bool reclaim; + int32 state; +}; + +struct nlm4_cancargs { + netobj cookie; + bool block; + bool exclusive; + struct nlm4_lock alock; +}; + +struct nlm4_testargs { + netobj cookie; + bool exclusive; + struct nlm4_lock alock; +}; + +struct nlm4_unlockargs { + netobj cookie; + struct nlm4_lock alock; +}; + +struct nlm4_share { + string caller_name; + netobj fh; + netobj oh; + fsh4_mode mode; + fsh4_access access; +}; + +struct nlm4_shareargs { + netobj cookie; + struct nlm4_share share; + bool reclaim; +}; + +struct nlm4_shareres { + netobj cookie; + nlm4_stats stat; + int32 sequence; +}; + +struct nlm4_notify { + string name; + int32 state; +}; + +enum { SM_PRIV_SIZE = 16 }; + +struct nlm4_notifyargs { + struct nlm4_notify notify; + u8 private[SM_PRIV_SIZE]; +}; + +enum { + NLMPROC4_NULL = 0, + NLMPROC4_TEST = 1, + NLMPROC4_LOCK = 2, + NLMPROC4_CANCEL = 3, + NLMPROC4_UNLOCK = 4, + NLMPROC4_GRANTED = 5, + NLMPROC4_TEST_MSG = 6, + NLMPROC4_LOCK_MSG = 7, + NLMPROC4_CANCEL_MSG = 8, + NLMPROC4_UNLOCK_MSG = 9, + NLMPROC4_GRANTED_MSG = 10, + NLMPROC4_TEST_RES = 11, + NLMPROC4_LOCK_RES = 12, + NLMPROC4_CANCEL_RES = 13, + NLMPROC4_UNLOCK_RES = 14, + NLMPROC4_GRANTED_RES = 15, + NLMPROC4_SM_NOTIFY = 16, + NLMPROC4_SHARE = 20, + NLMPROC4_UNSHARE = 21, + NLMPROC4_NM_LOCK = 22, + NLMPROC4_FREE_ALL = 23, +}; + +#ifndef NLM4_PROG +#define NLM4_PROG (100021) +#endif + +#define NLM4_netobj_sz (XDR_unsigned_int + XDR_QUADLEN(MAXNETOBJ_SZ)) +#define NLM4_fsh4_mode_sz (XDR_int) +#define NLM4_fsh4_access_sz (XDR_int) +#define NLM4_uint64_sz \ + (XDR_unsigned_hyper) +#define NLM4_int64_sz \ + (XDR_hyper) +#define NLM4_uint32_sz \ + (XDR_unsigned_long) +#define NLM4_int32_sz \ + (XDR_long) +#define NLM4_nlm4_stats_sz (XDR_int) +#define NLM4_nlm4_holder_sz \ + (XDR_bool + NLM4_int32_sz + NLM4_netobj_sz + NLM4_uint64_sz + NLM4_uint64_sz) +#define NLM4_nlm4_testrply_sz \ + (NLM4_nlm4_stats_sz + NLM4_nlm4_holder_sz) +#define NLM4_nlm4_stat_sz \ + (NLM4_nlm4_stats_sz) +#define NLM4_nlm4_res_sz \ + (NLM4_netobj_sz + NLM4_nlm4_stat_sz) +#define NLM4_nlm4_testres_sz \ + (NLM4_netobj_sz + NLM4_nlm4_testrply_sz) +#define NLM4_nlm4_lock_sz \ + (XDR_unsigned_int + XDR_QUADLEN(LM_MAXSTRLEN) + NLM4_netobj_sz + NLM4_netobj_sz + NLM4_int32_sz + NLM4_uint64_sz + NLM4_uint64_sz) +#define NLM4_nlm4_lockargs_sz \ + (NLM4_netobj_sz + XDR_bool + XDR_bool + NLM4_nlm4_lock_sz + XDR_bool + NLM4_int32_sz) +#define NLM4_nlm4_cancargs_sz \ + (NLM4_netobj_sz + XDR_bool + XDR_bool + NLM4_nlm4_lock_sz) +#define NLM4_nlm4_testargs_sz \ + (NLM4_netobj_sz + XDR_bool + NLM4_nlm4_lock_sz) +#define NLM4_nlm4_unlockargs_sz \ + (NLM4_netobj_sz + NLM4_nlm4_lock_sz) +#define NLM4_nlm4_share_sz \ + (XDR_unsigned_int + XDR_QUADLEN(LM_MAXSTRLEN) + NLM4_netobj_sz + NLM4_netobj_sz + NLM4_fsh4_mode_sz + NLM4_fsh4_access_sz) +#define NLM4_nlm4_shareargs_sz \ + (NLM4_netobj_sz + NLM4_nlm4_share_sz + XDR_bool) +#define NLM4_nlm4_shareres_sz \ + (NLM4_netobj_sz + NLM4_nlm4_stats_sz + NLM4_int32_sz) +#define NLM4_nlm4_notify_sz \ + (XDR_unsigned_int + XDR_QUADLEN(LM_MAXNAMELEN) + NLM4_int32_sz) +#define NLM4_nlm4_notifyargs_sz \ + (NLM4_nlm4_notify_sz + XDR_QUADLEN(SM_PRIV_SIZE)) +#define NLM4_MAX_ARGS_SZ \ + (NLM4_nlm4_lockargs_sz) + +#endif /* _LINUX_XDRGEN_NLM4_DEF_H */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index f46d1fb8f71a..a82045804d34 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -404,6 +404,8 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size, unsigned int max_req); void xprt_free(struct rpc_xprt *); void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_add_backlog_noncongested(struct rpc_xprt *xprt, + struct rpc_task *task); bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req); void xprt_cleanup_ids(void); diff --git a/include/linux/swap.h b/include/linux/swap.h index 4b1f13b5bbad..7a09df6977a5 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -310,8 +310,7 @@ extern unsigned long totalreserve_pages; /* linux/mm/swap.c */ void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, - unsigned int nr_io, unsigned int nr_rotated) - __releases(lruvec->lru_lock); + unsigned int nr_io, unsigned int nr_rotated); void lru_note_cost_refault(struct folio *); void folio_add_lru(struct folio *); void folio_add_lru_vma(struct folio *, struct vm_area_struct *); @@ -353,6 +352,7 @@ extern void swap_setup(void); extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); +unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) #define MEMCG_RECLAIM_PROACTIVE (1 << 2) @@ -547,6 +547,8 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) return READ_ONCE(memcg->swappiness); } + +void lru_reparent_memcg(struct mem_cgroup *memcg, struct mem_cgroup *parent, int nid); #else static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) { @@ -611,5 +613,24 @@ static inline bool mem_cgroup_swap_full(struct folio *folio) } #endif +/* for_each_managed_zone_pgdat - helper macro to iterate over all managed zones in a pgdat up to + * and including the specified highidx + * @zone: The current zone in the iterator + * @pgdat: The pgdat which node_zones are being iterated + * @idx: The index variable + * @highidx: The index of the highest zone to return + * + * This macro iterates through all managed zones up to and including the specified highidx. + * The zone iterator enters an invalid state after macro call and must be reinitialized + * before it can be used again. + */ +#define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \ + for ((idx) = 0, (zone) = (pgdat)->node_zones; \ + (idx) <= (highidx); \ + (idx)++, (zone)++) \ + if (!managed_zone(zone)) \ + continue; \ + else + #endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */ diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 2c59fe3efcd4..b6c2496866ea 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <linux/blk-mq.h> +#include <linux/wordpart.h> /* * A T10 PI-capable target device can be formatted with different @@ -25,6 +26,16 @@ enum t10_dif_type { T10_PI_TYPE3_PROTECTION = 0x3, }; +static inline u64 full_pi_ref_tag(const struct request *rq) +{ + unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && + rq->q->limits.integrity.interval_exp) + shift = rq->q->limits.integrity.interval_exp; + return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT); +} + /* * T10 Protection Information tuple. */ @@ -39,12 +50,7 @@ struct t10_pi_tuple { static inline u32 t10_pi_ref_tag(struct request *rq) { - unsigned int shift = ilog2(queue_logical_block_size(rq->q)); - - if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && - rq->q->limits.integrity.interval_exp) - shift = rq->q->limits.integrity.interval_exp; - return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; + return lower_32_bits(full_pi_ref_tag(rq)); } struct crc64_pi_tuple { @@ -64,12 +70,7 @@ static inline u64 lower_48_bits(u64 n) static inline u64 ext_pi_ref_tag(struct request *rq) { - unsigned int shift = ilog2(queue_logical_block_size(rq->q)); - - if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && - rq->q->limits.integrity.interval_exp) - shift = rq->q->limits.integrity.interval_exp; - return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT)); + return lower_48_bits(full_pi_ref_tag(rq)); } #endif diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 891368e82558..aff8ea2fa98e 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -131,11 +131,16 @@ struct tcg_algorithm_info { }; #ifndef TPM_MEMREMAP -#define TPM_MEMREMAP(start, size) NULL +static inline void *TPM_MEMREMAP(unsigned long start, size_t size) +{ + return NULL; +} #endif #ifndef TPM_MEMUNMAP -#define TPM_MEMUNMAP(start, size) do{} while(0) +static inline void TPM_MEMUNMAP(void *mapping, size_t size) +{ +} #endif /** diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 578e520b6ee6..763eea4d80d8 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -202,7 +202,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define TP_CONDITION(args...) args /* - * Individual subsystem my have a separate configuration to + * Individual subsystem may have a separate configuration to * enable their tracepoints. By default, this file will create * the tracepoints if CONFIG_TRACEPOINTS is defined. If a subsystem * wants to be able to disable its tracepoints from being created diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h index 31125e3be3c5..48adcb0e8ff3 100644 --- a/include/linux/tty_buffer.h +++ b/include/linux/tty_buffer.h @@ -34,6 +34,7 @@ static inline u8 *flag_buf_ptr(struct tty_buffer *b, unsigned int ofs) struct tty_bufhead { struct tty_buffer *head; /* Queue head */ + struct workqueue_struct *flip_wq; struct work_struct work; struct mutex lock; atomic_t priority; diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 188ee9b768eb..1f2896e56e77 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -69,6 +69,10 @@ struct serial_struct; * Do not create numbered ``/dev`` nodes. For example, create * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a * driver for a single tty device is being allocated. + * + * @TTY_DRIVER_NO_WORKQUEUE: + * Do not create workqueue when tty_register_driver(). Whenever set, flip + * buffer workqueue can be set by tty_port_link_wq() for every port. */ enum tty_driver_flag { TTY_DRIVER_INSTALLED = BIT(0), @@ -79,6 +83,7 @@ enum tty_driver_flag { TTY_DRIVER_HARDWARE_BREAK = BIT(5), TTY_DRIVER_DYNAMIC_ALLOC = BIT(6), TTY_DRIVER_UNNUMBERED_NODE = BIT(7), + TTY_DRIVER_NO_WORKQUEUE = BIT(8), }; enum tty_driver_type { @@ -506,6 +511,7 @@ struct tty_operations { * @flags: tty driver flags (%TTY_DRIVER_) * @proc_entry: proc fs entry, used internally * @other: driver of the linked tty; only used for the PTY driver + * @flip_wq: workqueue to queue flip buffer work on * @ttys: array of active &struct tty_struct, set by tty_standard_install() * @ports: array of &struct tty_port; can be set during initialization by * tty_port_link_device() and similar @@ -539,6 +545,7 @@ struct tty_driver { unsigned long flags; struct proc_dir_entry *proc_entry; struct tty_driver *other; + struct workqueue_struct *flip_wq; /* * Pointer to the tty data structures diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index c5cccc3fc1e8..d227a58e3e49 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -266,7 +266,7 @@ struct tty_ldisc_ops { }; struct tty_ldisc { - struct tty_ldisc_ops *ops; + const struct tty_ldisc_ops *ops; struct tty_struct *tty; }; @@ -281,8 +281,8 @@ struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); void tty_ldisc_flush(struct tty_struct *tty); -int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc); -void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc); +int tty_register_ldisc(const struct tty_ldisc_ops *new_ldisc); +void tty_unregister_ldisc(const struct tty_ldisc_ops *ldisc); int tty_set_ldisc(struct tty_struct *tty, int disc); #endif /* _LINUX_TTY_LDISC_H */ diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h index 660c254f1efe..d2a7882c0b58 100644 --- a/include/linux/tty_port.h +++ b/include/linux/tty_port.h @@ -138,6 +138,7 @@ struct tty_port { kernel */ void tty_port_init(struct tty_port *port); +void tty_port_link_wq(struct tty_port *port, struct workqueue_struct *flip_wq); void tty_port_link_device(struct tty_port *port, struct tty_driver *driver, unsigned index); struct device *tty_port_register_device(struct tty_port *port, @@ -165,6 +166,18 @@ static inline struct tty_port *tty_port_get(struct tty_port *port) return NULL; } +/* + * Never overwrite the workqueue set by tty_port_link_wq(). + * No effect when %TTY_DRIVER_NO_WORKQUEUE is set, as driver->flip_wq is + * %NULL. + */ +static inline void tty_port_link_driver_wq(struct tty_port *port, + struct tty_driver *driver) +{ + if (!port->buf.flip_wq) + tty_port_link_wq(port, driver->flip_wq); +} + /* If the cts flow control is enabled, return true. */ static inline bool tty_port_cts_enabled(const struct tty_port *port) { diff --git a/include/linux/usb.h b/include/linux/usb.h index 4aab20015851..25a203ac7a7e 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -55,7 +55,8 @@ struct ep_device; * @eusb2_isoc_ep_comp: eUSB2 isoc companion descriptor for this endpoint * @urb_list: urbs queued to this endpoint; maintained by usbcore * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH) - * with one or more transfer descriptors (TDs) per urb + * with one or more transfer descriptors (TDs) per urb; must be preserved + * by core while BW is allocated for the endpoint * @ep_dev: ep_device for sysfs info * @extra: descriptors following this endpoint in the configuration * @extralen: how many bytes of "extra" are valid @@ -2081,6 +2082,8 @@ static inline int usb_translate_errors(int error_code) case -ENODEV: case -EOPNOTSUPP: return error_code; + case -ENOSPC: + return -EBUSY; default: return -EIO; } diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 4ac082a63173..97ef37a1ff4a 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -118,8 +118,8 @@ struct cdc_ncm_ctx { u32 timer_interval; u32 max_ndp_size; - u8 is_ndp16; - u8 filtering_supported; + bool is_ndp16; + bool filtering_supported; union { struct usb_cdc_ncm_ndp16 *delayed_ndp16; struct usb_cdc_ncm_ndp32 *delayed_ndp32; diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index 6ccd1b2af993..337a5485af7c 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -34,7 +34,8 @@ enum pd_ctrl_msg_type { PD_CTRL_FR_SWAP = 19, PD_CTRL_GET_PPS_STATUS = 20, PD_CTRL_GET_COUNTRY_CODES = 21, - /* 22-23 Reserved */ + PD_CTRL_GET_SINK_CAP_EXT = 22, + /* 23 Reserved */ PD_CTRL_GET_REVISION = 24, /* 25-31 Reserved */ }; @@ -72,7 +73,8 @@ enum pd_ext_msg_type { PD_EXT_PPS_STATUS = 12, PD_EXT_COUNTRY_INFO = 13, PD_EXT_COUNTRY_CODES = 14, - /* 15-31 Reserved */ + PD_EXT_SINK_CAP_EXT = 15, + /* 16-31 Reserved */ }; #define PD_REV10 0x0 @@ -205,6 +207,72 @@ struct pd_message { }; } __packed; +/* + * count_chunked_data_objs - Helper to calculate number of Data Objects on a 4 + * byte boundary. + * @size: Size of data block for extended message. Should *not* include extended + * header size. + */ +static inline u8 count_chunked_data_objs(u32 size) +{ + size += offsetof(struct pd_chunked_ext_message_data, data); + return ((size / 4) + (size % 4 ? 1 : 0)); +} + +/* Sink Caps Extended Data Block Version */ +#define SKEDB_VER_1_0 1 + +/* Sink Caps Extended Sink Modes */ +#define SINK_MODE_PPS BIT(0) +#define SINK_MODE_VBUS BIT(1) +#define SINK_MODE_AC_SUPPLY BIT(2) +#define SINK_MODE_BATT BIT(3) +#define SINK_MODE_BATT_UL BIT(4) /* Unlimited battery power supply */ +#define SINK_MODE_AVS BIT(5) + +/** + * struct sink_caps_ext_msg - Sink extended capability PD message + * @vid: Vendor ID + * @pid: Product ID + * @xid: Value assigned by USB-IF for product + * @fw: Firmware version + * @hw: Hardware version + * @skedb_ver: Sink Caps Extended Data Block (SKEDB) Version + * @load_step: Indicates the load step slew rate. + * @load_char: Sink overload characteristics + * @compliance: Types of sources the sink has been tested & certified on + * @touch_temp: Indicates the IEC standard to which the touch temperature + * conforms to (if applicable). + * @batt_info: Indicates number batteries and hot swappable ports + * @modes: Charging caps & power sources supported + * @spr_min_pdp: Sink Minimum PDP for SPR mode + * @spr_op_pdp: Sink Operational PDP for SPR mode + * @spr_max_pdp: Sink Maximum PDP for SPR mode + * @epr_min_pdp: Sink Minimum PDP for EPR mode + * @epr_op_pdp: Sink Operational PDP for EPR mode + * @epr_max_pdp: Sink Maximum PDP for EPR mode + */ +struct sink_caps_ext_msg { + __le16 vid; + __le16 pid; + __le32 xid; + u8 fw; + u8 hw; + u8 skedb_ver; + u8 load_step; + __le16 load_char; + u8 compliance; + u8 touch_temp; + u8 batt_info; + u8 modes; + u8 spr_min_pdp; + u8 spr_op_pdp; + u8 spr_max_pdp; + u8 epr_min_pdp; + u8 epr_op_pdp; + u8 epr_max_pdp; +} __packed; + /* PDO: Power Data Object */ #define PDO_MAX_OBJECTS 7 @@ -329,6 +397,32 @@ enum pd_apdo_type { #define PDO_SPR_AVS_APDO_9V_TO_15V_MAX_CURR GENMASK(19, 10) /* 10mA unit */ #define PDO_SPR_AVS_APDO_15V_TO_20V_MAX_CURR GENMASK(9, 0) /* 10mA unit */ +/* SPR AVS has two different current ranges 9V - 15V, 15V - 20V */ +#define SPR_AVS_TIER1_MIN_VOLT_MV 9000 +#define SPR_AVS_TIER1_MAX_VOLT_MV 15000 +#define SPR_AVS_TIER2_MAX_VOLT_MV 20000 + +#define SPR_AVS_AVS_SMALL_STEP_V 1 +/* vAvsStep - 100mv */ +#define SPR_AVS_VOLT_MV_STEP 100 +/* SPR AVS RDO Operating Current is in 50mA step */ +#define RDO_SPR_AVS_CURR_MA_STEP 50 +/* SPR AVS RDO Output voltage is in 25mV step */ +#define RDO_SPR_AVS_OUT_VOLT_MV_STEP 25 + +#define RDO_SPR_AVS_VOLT GENMASK(20, 9) +#define RDO_SPR_AVS_CURR GENMASK(6, 0) + +#define RDO_SPR_AVS_OUT_VOLT(mv) \ + FIELD_PREP(RDO_SPR_AVS_VOLT, ((mv) / RDO_SPR_AVS_OUT_VOLT_MV_STEP)) + +#define RDO_SPR_AVS_OP_CURR(ma) \ + FIELD_PREP(RDO_SPR_AVS_CURR, ((ma) / RDO_SPR_AVS_CURR_MA_STEP)) + +#define RDO_AVS(idx, out_mv, op_ma, flags) \ + (RDO_OBJ(idx) | (flags) | \ + RDO_SPR_AVS_OUT_VOLT(out_mv) | RDO_SPR_AVS_OP_CURR(op_ma)) + static inline enum pd_pdo_type pdo_type(u32 pdo) { return (pdo >> PDO_TYPE_SHIFT) & PDO_TYPE_MASK; @@ -339,6 +433,11 @@ static inline unsigned int pdo_fixed_voltage(u32 pdo) return ((pdo >> PDO_FIXED_VOLT_SHIFT) & PDO_VOLT_MASK) * 50; } +static inline unsigned int pdo_fixed_current(u32 pdo) +{ + return ((pdo >> PDO_FIXED_CURR_SHIFT) & PDO_CURR_MASK) * 10; +} + static inline unsigned int pdo_min_voltage(u32 pdo) { return ((pdo >> PDO_VAR_MIN_VOLT_SHIFT) & PDO_VOLT_MASK) * 50; @@ -582,6 +681,11 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_P_SNK_STDBY_MW 2500 /* 2500 mW */ +#define PD_I_SNK_STBY_MA 500 /* 500 mA */ + +#define PD_T_AVS_SRC_TRANS_SMALL 50 /* 50 ms */ +#define PD_T_AVS_SRC_TRANS_LARGE 700 /* 700 ms */ + #if IS_ENABLED(CONFIG_TYPEC) struct usb_power_delivery; diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index b22e659f81ba..93079450bba0 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -31,7 +31,7 @@ enum typec_cc_polarity { /* Time to wait for TCPC to complete transmit */ #define PD_T_TCPC_TX_TIMEOUT 100 /* in ms */ #define PD_ROLE_SWAP_TIMEOUT (MSEC_PER_SEC * 10) -#define PD_PPS_CTRL_TIMEOUT (MSEC_PER_SEC * 10) +#define PD_AUG_PSY_CTRL_TIMEOUT (MSEC_PER_SEC * 10) enum tcpm_transmit_status { TCPC_TX_SUCCESS = 0, diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h index 0513d333b797..b90cc5cfff8d 100644 --- a/include/linux/usb/typec_altmode.h +++ b/include/linux/usb/typec_altmode.h @@ -26,6 +26,9 @@ struct typec_altmode_ops; * @mode: Index of the Mode * @vdo: VDO returned by Discover Modes USB PD command * @active: Tells has the mode been entered or not + * @priority: Priority used by the automatic alternate mode selection process + * @mode_selection: Whether entry to this alternate mode is managed by the + * automatic alternate mode selection process or by the specific driver * @desc: Optional human readable description of the mode * @ops: Operations vector from the driver * @cable_ops: Cable operations vector from the driver. diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index d83e349900a3..d2920f98ab86 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -83,6 +83,39 @@ struct userfaultfd_ctx { extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); +/* VMA userfaultfd operations */ +struct vm_uffd_ops { + /* Checks if a VMA can support userfaultfd */ + bool (*can_userfault)(struct vm_area_struct *vma, vm_flags_t vm_flags); + /* + * Called to resolve UFFDIO_CONTINUE request. + * Should return the folio found at pgoff in the VMA's pagecache if it + * exists or ERR_PTR otherwise. + * The returned folio is locked and with reference held. + */ + struct folio *(*get_folio_noalloc)(struct inode *inode, pgoff_t pgoff); + /* + * Called during resolution of UFFDIO_COPY request. + * Should allocate and return a folio or NULL if allocation fails. + */ + struct folio *(*alloc_folio)(struct vm_area_struct *vma, + unsigned long addr); + /* + * Called during resolution of UFFDIO_COPY request. + * Should only be called with a folio returned by alloc_folio() above. + * The folio will be set to locked. + * Returns 0 on success, error code on failure. + */ + int (*filemap_add)(struct folio *folio, struct vm_area_struct *vma, + unsigned long addr); + /* + * Called during resolution of UFFDIO_COPY request on the error + * handling path. + * Should revert the operation of ->filemap_add(). + */ + void (*filemap_remove)(struct folio *folio, struct vm_area_struct *vma); +}; + /* A combined operation mode + behavior flags. */ typedef unsigned int __bitwise uffd_flags_t; @@ -114,11 +147,6 @@ static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_at /* Flags controlling behavior. These behavior changes are mode-independent. */ #define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0) -extern int mfill_atomic_install_pte(pmd_t *dst_pmd, - struct vm_area_struct *dst_vma, - unsigned long dst_addr, struct page *page, - bool newly_allocated, uffd_flags_t flags); - extern ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long src_start, unsigned long len, uffd_flags_t flags); @@ -211,39 +239,8 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) return vma->vm_flags & __VM_UFFD_FLAGS; } -static inline bool vma_can_userfault(struct vm_area_struct *vma, - vm_flags_t vm_flags, - bool wp_async) -{ - vm_flags &= __VM_UFFD_FLAGS; - - if (vma->vm_flags & VM_DROPPABLE) - return false; - - if ((vm_flags & VM_UFFD_MINOR) && - (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) - return false; - - /* - * If wp async enabled, and WP is the only mode enabled, allow any - * memory type. - */ - if (wp_async && (vm_flags == VM_UFFD_WP)) - return true; - - /* - * If user requested uffd-wp but not enabled pte markers for - * uffd-wp, then shmem & hugetlbfs are not supported but only - * anonymous. - */ - if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP) && - !vma_is_anonymous(vma)) - return false; - - /* By default, allow any of anon|shmem|hugetlb */ - return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || - vma_is_shmem(vma); -} +bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags, + bool wp_async); static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) { diff --git a/include/linux/virtio_caif.h b/include/linux/virtio_caif.h deleted file mode 100644 index ea722479510c..000000000000 --- a/include/linux/virtio_caif.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (C) ST-Ericsson AB 2012 - * Author: Sjur Brændeland <sjur.brandeland@stericsson.com> - * - * This header is BSD licensed so - * anyone can use the definitions to implement compatible remote processors - */ - -#ifndef VIRTIO_CAIF_H -#define VIRTIO_CAIF_H - -#include <linux/types.h> -struct virtio_caif_transf_config { - __virtio16 headroom; - __virtio16 tailroom; - __virtio32 mtu; - u8 reserved[4]; -}; - -struct virtio_caif_config { - struct virtio_caif_transf_config uplink, downlink; - u8 reserved[8]; -}; -#endif diff --git a/include/linux/wmi.h b/include/linux/wmi.h index 14fb644e1701..d723e4b1cafb 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -64,9 +64,13 @@ ssize_t wmi_string_from_utf8s(struct wmi_string *str, size_t max_chars, const u8 size_t src_length); int wmidev_invoke_method(struct wmi_device *wdev, u8 instance, u32 method_id, - const struct wmi_buffer *in, struct wmi_buffer *out); + const struct wmi_buffer *in, struct wmi_buffer *out, size_t min_size); -int wmidev_query_block(struct wmi_device *wdev, u8 instance, struct wmi_buffer *out); +int wmidev_invoke_procedure(struct wmi_device *wdev, u8 instance, u32 method_id, + const struct wmi_buffer *in); + +int wmidev_query_block(struct wmi_device *wdev, u8 instance, struct wmi_buffer *out, + size_t min_size); int wmidev_set_block(struct wmi_device *wdev, u8 instance, const struct wmi_buffer *in); @@ -83,7 +87,7 @@ u8 wmidev_instance_count(struct wmi_device *wdev); * struct wmi_driver - WMI driver structure * @driver: Driver model structure * @id_table: List of WMI GUIDs supported by this driver - * @no_notify_data: Driver supports WMI events which provide no event data + * @min_event_size: Minimum event payload size supported by this driver * @no_singleton: Driver can be instantiated multiple times * @probe: Callback for device binding * @remove: Callback for device unbinding @@ -93,11 +97,14 @@ u8 wmidev_instance_count(struct wmi_device *wdev); * * This represents WMI drivers which handle WMI devices. The data inside the buffer * passed to the @notify_new callback is guaranteed to be aligned on a 8-byte boundary. + * The minimum supported size for said buffer can be specified using @min_event_size. + * WMI drivers that still use the deprecated @notify callback can still set @min_event_size + * to 0 in order to signal that they support WMI events which provide no event data. */ struct wmi_driver { struct device_driver driver; const struct wmi_device_id *id_table; - bool no_notify_data; + size_t min_event_size; bool no_singleton; int (*probe)(struct wmi_device *wdev, const void *context); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ab6cb70ca1a5..6177624539b3 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -534,8 +534,10 @@ alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...) * Pointer to the allocated workqueue on success, %NULL on failure. */ __printf(2, 5) struct workqueue_struct * -devm_alloc_workqueue(struct device *dev, const char *fmt, unsigned int flags, - int max_active, ...); +devm_alloc_workqueue_noprof(struct device *dev, const char *fmt, + unsigned int flags, int max_active, ...); +#define devm_alloc_workqueue(...) \ + alloc_hooks(devm_alloc_workqueue_noprof(__VA_ARGS__)) #ifdef CONFIG_LOCKDEP /** diff --git a/include/linux/yam.h b/include/linux/yam.h deleted file mode 100644 index a29b04fa1e66..000000000000 --- a/include/linux/yam.h +++ /dev/null @@ -1,67 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/*****************************************************************************/ - -/* - * yam.h -- YAM radio modem driver. - * - * Copyright (C) 1998 Frederic Rible F1OAT (frible@teaser.fr) - * Adapted from baycom.c driver written by Thomas Sailer (sailer@ife.ee.ethz.ch) - * - * Please note that the GPL allows you to use the driver, NOT the radio. - * In order to use the radio, you need a license from the communications - * authority of your country. - */ - -/*****************************************************************************/ - -#define SIOCYAMRESERVED (0) -#define SIOCYAMSCFG (1) /* Set configuration */ -#define SIOCYAMGCFG (2) /* Get configuration */ -#define SIOCYAMSMCS (3) /* Set mcs data */ - -#define YAM_IOBASE (1 << 0) -#define YAM_IRQ (1 << 1) -#define YAM_BITRATE (1 << 2) /* Bit rate of radio port ->57600 */ -#define YAM_MODE (1 << 3) /* 0=simplex 1=duplex 2=duplex+tempo */ -#define YAM_HOLDDLY (1 << 4) /* duplex tempo (sec) */ -#define YAM_TXDELAY (1 << 5) /* Tx Delay (ms) */ -#define YAM_TXTAIL (1 << 6) /* Tx Tail (ms) */ -#define YAM_PERSIST (1 << 7) /* Persist (ms) */ -#define YAM_SLOTTIME (1 << 8) /* Slottime (ms) */ -#define YAM_BAUDRATE (1 << 9) /* Baud rate of rs232 port ->115200 */ - -#define YAM_MAXBITRATE 57600 -#define YAM_MAXBAUDRATE 115200 -#define YAM_MAXMODE 2 -#define YAM_MAXHOLDDLY 99 -#define YAM_MAXTXDELAY 999 -#define YAM_MAXTXTAIL 999 -#define YAM_MAXPERSIST 255 -#define YAM_MAXSLOTTIME 999 - -#define YAM_FPGA_SIZE 5302 - -struct yamcfg { - unsigned int mask; /* Mask of commands */ - unsigned int iobase; /* IO Base of COM port */ - unsigned int irq; /* IRQ of COM port */ - unsigned int bitrate; /* Bit rate of radio port */ - unsigned int baudrate; /* Baud rate of the RS232 port */ - unsigned int txdelay; /* TxDelay */ - unsigned int txtail; /* TxTail */ - unsigned int persist; /* Persistence */ - unsigned int slottime; /* Slottime */ - unsigned int mode; /* mode 0 (simp), 1(Dupl), 2(Dupl+delay) */ - unsigned int holddly; /* PTT delay in FullDuplex 2 mode */ -}; - -struct yamdrv_ioctl_cfg { - int cmd; - struct yamcfg cfg; -}; - -struct yamdrv_ioctl_mcs { - int cmd; - unsigned int bitrate; - unsigned char bits[YAM_FPGA_SIZE]; -}; |
