diff options
Diffstat (limited to 'include/linux')
35 files changed, 162 insertions, 115 deletions
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 26f0ecf401ea..0bbfd647f5c6 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -65,6 +65,7 @@ struct amba_device { struct device dev; struct resource res; struct clk *pclk; + struct device_dma_parameters dma_parms; unsigned int periphid; unsigned int cid; struct amba_cs_uci_id uci; diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index ee577a83cfe6..7367150f962a 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -219,6 +219,7 @@ struct backing_dev_info { wait_queue_head_t wb_waitq; struct device *dev; + char dev_name[64]; struct device *owner; struct timer_list laptop_mode_wb_timer; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index f88197c1ffc2..c9ad5c3b7b4b 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) (1 << WB_async_congested)); } -extern const char *bdi_unknown_name; - -static inline const char *bdi_dev_name(struct backing_dev_info *bdi) -{ - if (!bdi || !bdi->dev) - return bdi_unknown_name; - return dev_name(bdi->dev); -} +const char *bdi_dev_name(struct backing_dev_info *bdi); #endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 6462c5447872..f4b77018c625 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -245,6 +245,7 @@ #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) #define BCM54810_SHD_CLK_CTL 0x3 #define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) +#define BCM54810_SHD_SCR3_TRDDAPD 0x0100 /* BCM54612E Registers */ #define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 034b0a644efc..448c91bf543b 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -356,4 +356,10 @@ static inline void *offset_to_ptr(const int *off) /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +/* + * This is needed in functions which generate the stack canary, see + * arch/x86/kernel/smpboot.c::start_secondary() for an example. + */ +#define prevent_tail_call_optimization() mb() + #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/cper.h b/include/linux/cper.h index 4f005d95ce88..8537e9282a65 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -521,6 +521,15 @@ struct cper_sec_pcie { u8 aer_info[96]; }; +/* Firmware Error Record Reference, UEFI v2.7 sec N.2.10 */ +struct cper_sec_fw_err_rec_ref { + u8 record_type; + u8 revision; + u8 reserved[6]; + u64 record_identifier; + guid_t record_identifier_guid; +}; + /* Reset to default packing */ #pragma pack() diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 1ade486fc2bb..57bcef6f988a 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -329,13 +329,12 @@ struct dma_buf { /** * struct dma_buf_attach_ops - importer operations for an attachment - * @move_notify: [optional] notification that the DMA-buf is moving * * Attachment operations implemented by the importer. */ struct dma_buf_attach_ops { /** - * @move_notify + * @move_notify: [optional] notification that the DMA-buf is moving * * If this callback is provided the framework can avoid pinning the * backing store while mappings exists. diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 21065c04c4ac..e1c03339918f 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -83,9 +83,9 @@ enum dma_transfer_direction { /** * Interleaved Transfer Request * ---------------------------- - * A chunk is collection of contiguous bytes to be transfered. + * A chunk is collection of contiguous bytes to be transferred. * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). - * ICGs may or maynot change between chunks. + * ICGs may or may not change between chunks. * A FRAME is the smallest series of contiguous {chunk,icg} pairs, * that when repeated an integral number of times, specifies the transfer. * A transfer template is specification of a Frame, the number of times @@ -341,13 +341,11 @@ struct dma_chan { * @chan: driver channel device * @device: sysfs device * @dev_id: parent dma_device dev_id - * @idr_ref: reference count to gate release of dma_device dev_id */ struct dma_chan_dev { struct dma_chan *chan; struct device device; int dev_id; - atomic_t *idr_ref; }; /** @@ -835,6 +833,8 @@ struct dma_device { int dev_id; struct device *dev; struct module *owner; + struct ida chan_ida; + struct mutex chan_mutex; /* to protect chan_ida */ u32 src_addr_widths; u32 dst_addr_widths; @@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) * dmaengine_synchronize() needs to be called before it is safe to free * any memory that is accessed by previously submitted descriptors or before * freeing any resources accessed from within the completion callback of any - * perviously submitted descriptors. + * previously submitted descriptors. * * This function can be called from atomic context as well as from within a * complete callback of a descriptor submitted on the same channel. @@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan) * * Synchronizes to the DMA channel termination to the current context. When this * function returns it is guaranteed that all transfers for previously issued - * descriptors have stopped and and it is safe to free the memory assoicated + * descriptors have stopped and it is safe to free the memory associated * with them. Furthermore it is guaranteed that all complete callback functions * for a previously submitted descriptor have finished running and it is safe to * free resources accessed from within the complete callbacks. diff --git a/include/linux/efi.h b/include/linux/efi.h index 251f1f783cdf..9430d01c0c3d 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1245,4 +1245,6 @@ struct linux_efi_memreserve { void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size); +char *efi_systab_show_arch(char *str); + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 4f6f59b4f22a..45cc10cdf6dd 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -983,7 +983,7 @@ struct file_handle { __u32 handle_bytes; int handle_type; /* file identifier */ - unsigned char f_handle[0]; + unsigned char f_handle[]; }; static inline struct file *get_file(struct file *f) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index db95244a62d4..ab4bd15cbcdb 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -210,6 +210,29 @@ struct ftrace_ops { #endif }; +extern struct ftrace_ops __rcu *ftrace_ops_list; +extern struct ftrace_ops ftrace_list_end; + +/* + * Traverse the ftrace_global_list, invoking all entries. The reason that we + * can use rcu_dereference_raw_check() is that elements removed from this list + * are simply leaked, so there is no need to interact with a grace-period + * mechanism. The rcu_dereference_raw_check() calls are needed to handle + * concurrent insertions into the ftrace_global_list. + * + * Silly Alpha and silly pointer-speculation compiler optimizations! + */ +#define do_for_each_ftrace_op(op, list) \ + op = rcu_dereference_raw_check(list); \ + do + +/* + * Optimized for just a single item in the list (as that is the normal case). + */ +#define while_for_each_ftrace_op(op) \ + while (likely(op = rcu_dereference_raw_check((op)->next)) && \ + unlikely((op) != &ftrace_list_end)) + /* * Type of the current tracing. */ diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 62d216ff1097..c230b4e70d75 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -17,9 +17,12 @@ enum host1x_class { HOST1X_CLASS_GR3D = 0x60, }; +struct host1x; struct host1x_client; struct iommu_group; +u64 host1x_get_dma_mask(struct host1x *host1x); + /** * struct host1x_client_ops - host1x client operations * @init: host1x client initialization code diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h index c5a977320f82..98ef73b7c8fd 100644 --- a/include/linux/i2c-mux.h +++ b/include/linux/i2c-mux.h @@ -29,7 +29,7 @@ struct i2c_mux_core { int num_adapters; int max_adapters; - struct i2c_adapter *adapter[0]; + struct i2c_adapter *adapter[]; }; struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 45d36ba4826b..49d29054e657 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -2,7 +2,7 @@ /* * i2c.h - definitions for the Linux i2c bus interface * Copyright (C) 1995-2000 Simon G. Vogl - * Copyright (C) 2013-2019 Wolfram Sang <wsa@the-dreams.de> + * Copyright (C) 2013-2019 Wolfram Sang <wsa@kernel.org> * * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and * Frodo Looijaard <frodol@dds.nl> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 01276e3d01b9..131cc1527d68 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -813,8 +813,11 @@ void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, + struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); +bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, + struct kvm_vcpu *except); bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap); diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 9cd4455528e5..5616b2567aa7 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -55,7 +55,7 @@ LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm) LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, struct fs_context *src_sc) -LSM_HOOK(int, 0, fs_context_parse_param, struct fs_context *fc, +LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, struct fs_parameter *param) LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb) @@ -243,7 +243,7 @@ LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name, char **value) LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) LSM_HOOK(int, 0, ismaclabel, const char *name) -LSM_HOOK(int, 0, secid_to_secctx, u32 secid, char **secdata, +LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata, u32 *seclen) LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index d275c72c4f8e..977edd3b7bd8 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -783,6 +783,8 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg, atomic_long_inc(&memcg->memory_events[event]); cgroup_file_notify(&memcg->events_file); + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) + break; if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) break; } while ((memcg = parent_mem_cgroup(memcg)) && diff --git a/include/linux/mhi.h b/include/linux/mhi.h index ad1996001965..3d7c3c26eeb9 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -53,9 +53,9 @@ enum mhi_callback { * @MHI_CHAIN: Linked transfer */ enum mhi_flags { - MHI_EOB, - MHI_EOT, - MHI_CHAIN, + MHI_EOB = BIT(0), + MHI_EOT = BIT(1), + MHI_CHAIN = BIT(2), }; /** @@ -335,14 +335,15 @@ struct mhi_controller_config { * @syserr_worker: System error worker * @state_event: State change event * @status_cb: CB function to notify power states of the device (required) - * @link_status: CB function to query link status of the device (required) * @wake_get: CB function to assert device wake (optional) * @wake_put: CB function to de-assert device wake (optional) * @wake_toggle: CB function to assert and de-assert device wake (optional) * @runtime_get: CB function to controller runtime resume (required) - * @runtimet_put: CB function to decrement pm usage (required) + * @runtime_put: CB function to decrement pm usage (required) * @map_single: CB function to create TRE buffer * @unmap_single: CB function to destroy TRE buffer + * @read_reg: Read a MHI register via the physical link (required) + * @write_reg: Write a MHI register via the physical link (required) * @buffer_len: Bounce buffer length * @bounce_buf: Use of bounce buffer * @fbc_download: MHI host needs to do complete image transfer (optional) @@ -417,7 +418,6 @@ struct mhi_controller { void (*status_cb)(struct mhi_controller *mhi_cntrl, enum mhi_callback cb); - int (*link_status)(struct mhi_controller *mhi_cntrl); void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); void (*wake_toggle)(struct mhi_controller *mhi_cntrl); @@ -427,6 +427,10 @@ struct mhi_controller { struct mhi_buf_info *buf); void (*unmap_single)(struct mhi_controller *mhi_cntrl, struct mhi_buf_info *buf); + int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, + u32 *out); + void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, + u32 val); size_t buffer_len; bool bounce_buf; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6f8f79ef829b..8397b6558dc7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -213,6 +213,12 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; +enum mlx5_cmdif_state { + MLX5_CMDIF_STATE_UNINITIALIZED, + MLX5_CMDIF_STATE_UP, + MLX5_CMDIF_STATE_DOWN, +}; + struct mlx5_cmd_first { __be32 data[4]; }; @@ -258,6 +264,7 @@ struct mlx5_cmd_stats { struct mlx5_cmd { struct mlx5_nb nb; + enum mlx5_cmdif_state state; void *cmd_alloc_buf; dma_addr_t alloc_dma; int alloc_size; @@ -284,6 +291,7 @@ struct mlx5_cmd { struct semaphore sem; struct semaphore pages_sem; int mode; + u16 allowed_opcode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct dma_pool *pool; struct mlx5_cmd_debug dbg; @@ -743,6 +751,7 @@ struct mlx5_cmd_work_ent { struct delayed_work cb_timeout_work; void *context; int idx; + struct completion handling; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; @@ -874,10 +883,17 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); } +enum { + CMD_ALLOWED_OPCODE_ALL, +}; + int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); +void mlx5_cmd_set_state(struct mlx5_core_dev *dev, + enum mlx5_cmdif_state cmdif_state); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); +void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); struct mlx5_async_ctx { struct mlx5_core_dev *dev; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 440230488025..e5f3e7d8d3d5 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1317,11 +1317,13 @@ struct nfs41_impl_id { struct nfstime4 date; }; +#define MAX_BIND_CONN_TO_SESSION_RETRIES 3 struct nfs41_bind_conn_to_session_args { struct nfs_client *client; struct nfs4_sessionid sessionid; u32 dir; bool use_conn_in_rdma_mode; + int retries; }; struct nfs41_bind_conn_to_session_res { diff --git a/include/linux/platform_data/cros_ec_sensorhub.h b/include/linux/platform_data/cros_ec_sensorhub.h index c588be843f61..0ecce6aa69d5 100644 --- a/include/linux/platform_data/cros_ec_sensorhub.h +++ b/include/linux/platform_data/cros_ec_sensorhub.h @@ -185,6 +185,7 @@ int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub, void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub, u8 sensor_num); +int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub); int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub); void cros_ec_sensorhub_ring_remove(void *arg); int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub, diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index bdc35753ef7c..77a2aada106d 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -25,6 +25,7 @@ struct platform_device { bool id_auto; struct device dev; u64 platform_dma_mask; + struct device_dma_parameters dma_parms; u32 num_resources; struct resource *resource; diff --git a/include/linux/pm.h b/include/linux/pm.h index e057d1fa2469..121c104a4090 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -544,31 +544,17 @@ struct pm_subsys_data { * These flags can be set by device drivers at the probe time. They need not be * cleared by the drivers as the driver core will take care of that. * - * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device. - * SMART_PREPARE: Check the return value of the driver's ->prepare callback. - * SMART_SUSPEND: No need to resume the device from runtime suspend. - * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible. - * - * Setting SMART_PREPARE instructs bus types and PM domains which may want - * system suspend/resume callbacks to be skipped for the device to return 0 from - * their ->prepare callbacks if the driver's ->prepare callback returns 0 (in - * other words, the system suspend/resume callbacks can only be skipped for the - * device if its driver doesn't object against that). This flag has no effect - * if NEVER_SKIP is set. - * - * Setting SMART_SUSPEND instructs bus types and PM domains which may want to - * runtime resume the device upfront during system suspend that doing so is not - * necessary from the driver's perspective. It also may cause them to skip - * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by - * the driver if they decide to leave the device in runtime suspend. - * - * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the - * driver prefers the device to be left in suspend after system resume. + * NO_DIRECT_COMPLETE: Do not apply direct-complete optimization to the device. + * SMART_PREPARE: Take the driver ->prepare callback return value into account. + * SMART_SUSPEND: Avoid resuming the device from runtime suspend. + * MAY_SKIP_RESUME: Allow driver "noirq" and "early" callbacks to be skipped. + * + * See Documentation/driver-api/pm/devices.rst for details. */ -#define DPM_FLAG_NEVER_SKIP BIT(0) +#define DPM_FLAG_NO_DIRECT_COMPLETE BIT(0) #define DPM_FLAG_SMART_PREPARE BIT(1) #define DPM_FLAG_SMART_SUSPEND BIT(2) -#define DPM_FLAG_LEAVE_SUSPENDED BIT(3) +#define DPM_FLAG_MAY_SKIP_RESUME BIT(3) struct dev_pm_info { pm_message_t power_state; @@ -758,8 +744,8 @@ extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); -extern bool dev_pm_may_skip_resume(struct device *dev); -extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); +extern bool dev_pm_skip_resume(struct device *dev); +extern bool dev_pm_skip_suspend(struct device *dev); #else /* !CONFIG_PM_SLEEP */ diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 3bdcbce8141a..3dbc207bff53 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -102,9 +102,9 @@ static inline bool pm_runtime_enabled(struct device *dev) return !dev->power.disable_depth; } -static inline bool pm_runtime_callbacks_present(struct device *dev) +static inline bool pm_runtime_has_no_callbacks(struct device *dev) { - return !dev->power.no_callbacks; + return dev->power.no_callbacks; } static inline void pm_runtime_mark_last_busy(struct device *dev) diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 121a7eda4593..c602670bbffb 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -105,10 +105,10 @@ struct ptp_system_timestamp { * parameter func: the desired function to use. * parameter chan: the function channel index to use. * - * @do_work: Request driver to perform auxiliary (periodic) operations - * Driver should return delay of the next auxiliary work scheduling - * time (>=0) or negative value in case further scheduling - * is not required. + * @do_aux_work: Request driver to perform auxiliary (periodic) operations + * Driver should return delay of the next auxiliary work + * scheduling time (>=0) or negative value in case further + * scheduling is not required. * * Drivers should embed their ptp_clock_info within a private * structure, obtaining a reference to it using container_of(). diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 8a709f63c5e5..ad31c9fb7158 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -187,6 +187,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, dst->sg.data[which] = src->sg.data[which]; dst->sg.data[which].length = size; dst->sg.size += size; + src->sg.size -= size; src->sg.data[which].length -= size; src->sg.data[which].offset += size; } diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index ca7e108248e2..02e7a5863d28 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -71,7 +71,13 @@ struct rpc_clnt { #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *cl_debugfs; /* debugfs directory */ #endif - struct rpc_xprt_iter cl_xpi; + /* cl_work is only needed after cl_xpi is no longer used, + * and that are of similar size + */ + union { + struct rpc_xprt_iter cl_xpi; + struct work_struct cl_work; + }; const struct cred *cl_cred; }; @@ -236,4 +242,9 @@ static inline int rpc_reply_expected(struct rpc_task *task) (task->tk_msg.rpc_proc->p_decode != NULL); } +static inline void rpc_task_close_connection(struct rpc_task *task) +{ + if (task->tk_xprt) + xprt_force_disconnect(task->tk_xprt); +} #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 48c1b1674cbf..bc07e51f20d1 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -21,6 +21,7 @@ struct gss_ctx { struct gss_api_mech *mech_type; void *internal_ctx_id; + unsigned int slack, align; }; #define GSS_C_NO_BUFFER ((struct xdr_netobj) 0) @@ -66,6 +67,7 @@ u32 gss_wrap( u32 gss_unwrap( struct gss_ctx *ctx_id, int offset, + int len, struct xdr_buf *inbuf); u32 gss_delete_sec_context( struct gss_ctx **ctx_id); @@ -126,6 +128,7 @@ struct gss_api_ops { u32 (*gss_unwrap)( struct gss_ctx *ctx_id, int offset, + int len, struct xdr_buf *buf); void (*gss_delete_sec_context)( void *internal_ctx_id); diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index c1d77dd8ed41..e8f8ffe7448b 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -83,7 +83,7 @@ struct gss_krb5_enctype { u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, struct page **pages); /* v2 encryption function */ - u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, + u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len, struct xdr_buf *buf, u32 *headskip, u32 *tailskip); /* v2 decryption function */ }; @@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, struct xdr_buf *outbuf, struct page **pages); u32 -gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, +gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len, struct xdr_buf *buf); @@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, struct page **pages); u32 -gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, struct xdr_buf *buf, u32 *plainoffset, u32 *plainlen); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 01bb41908c93..22c207b2425f 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -184,6 +184,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern void xdr_buf_trim(struct xdr_buf *, unsigned int); extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 4fcc6fd0cbd6..b960098acfb0 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -466,6 +466,12 @@ static inline bool system_entering_hibernation(void) { return false; } static inline bool hibernation_available(void) { return false; } #endif /* CONFIG_HIBERNATION */ +#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV +int is_hibernate_resume_dev(const struct inode *); +#else +static inline int is_hibernate_resume_dev(const struct inode *i) { return 0; } +#endif + /* Hibernation and suspend events */ #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 421c99c12291..4f8159e90ce1 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -78,47 +78,6 @@ struct tcp_sack_block { #define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ #define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ -#if IS_ENABLED(CONFIG_MPTCP) -struct mptcp_options_received { - u64 sndr_key; - u64 rcvr_key; - u64 data_ack; - u64 data_seq; - u32 subflow_seq; - u16 data_len; - u16 mp_capable : 1, - mp_join : 1, - dss : 1, - add_addr : 1, - rm_addr : 1, - family : 4, - echo : 1, - backup : 1; - u32 token; - u32 nonce; - u64 thmac; - u8 hmac[20]; - u8 join_id; - u8 use_map:1, - dsn64:1, - data_fin:1, - use_ack:1, - ack64:1, - mpc_map:1, - __unused:2; - u8 addr_id; - u8 rm_id; - union { - struct in_addr addr; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - struct in6_addr addr6; -#endif - }; - u64 ahmac; - u16 port; -}; -#endif - struct tcp_options_received { /* PAWS/RTTM data */ int ts_recent_stamp;/* Time we stored ts_recent (for aging) */ @@ -136,9 +95,6 @@ struct tcp_options_received { u8 num_sacks; /* Number of SACK blocks */ u16 user_mss; /* mss requested by user in ioctl */ u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ -#if IS_ENABLED(CONFIG_MPTCP) - struct mptcp_options_received mptcp; -#endif }; static inline void tcp_clear_options(struct tcp_options_received *rx_opt) @@ -148,13 +104,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt) #if IS_ENABLED(CONFIG_SMC) rx_opt->smc_ok = 0; #endif -#if IS_ENABLED(CONFIG_MPTCP) - rx_opt->mptcp.mp_capable = 0; - rx_opt->mptcp.mp_join = 0; - rx_opt->mptcp.add_addr = 0; - rx_opt->mptcp.rm_addr = 0; - rx_opt->mptcp.dss = 0; -#endif } /* This is the max number of SACKS that we'll generate and process. It's safe diff --git a/include/linux/tty.h b/include/linux/tty.h index bd5fe0e907e8..a99e9b8e4e31 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -66,7 +66,7 @@ struct tty_buffer { int read; int flags; /* Data points here */ - unsigned long data[0]; + unsigned long data[]; }; /* Values for .flags field of tty_buffer */ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 0d1fe9297ac6..6f6ade63b04c 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -3,6 +3,8 @@ #define _LINUX_VIRTIO_NET_H #include <linux/if_vlan.h> +#include <uapi/linux/tcp.h> +#include <uapi/linux/udp.h> #include <uapi/linux/virtio_net.h> static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, @@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, bool little_endian) { unsigned int gso_type = 0; + unsigned int thlen = 0; + unsigned int ip_proto; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: gso_type = SKB_GSO_TCPV4; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; + ip_proto = IPPROTO_UDP; + thlen = sizeof(struct udphdr); break; default: return -EINVAL; @@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; + + if (skb_transport_offset(skb) + thlen > skb_headlen(skb)) + return -EINVAL; } else { /* gso packets without NEEDS_CSUM do not set transport_offset. * probe and drop if does not match one of the above types. */ if (gso_type && skb->network_header) { + struct flow_keys_basic keys; + if (!skb->protocol) virtio_net_hdr_set_proto(skb, hdr); retry: - skb_probe_transport_header(skb); - if (!skb_transport_header_was_set(skb)) { + if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, + NULL, 0, 0, 0, + 0)) { /* UFO does not specify ipv4 or 6: try both */ if (gso_type & SKB_GSO_UDP && skb->protocol == htons(ETH_P_IP)) { @@ -75,6 +91,12 @@ retry: } return -EINVAL; } + + if (keys.control.thoff + thlen > skb_headlen(skb) || + keys.basic.ip_proto != ip_proto) + return -EINVAL; + + skb_set_transport_header(skb, keys.control.thoff); } } diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 71c81e0dc8f2..dc636b727179 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -48,6 +48,7 @@ struct virtio_vsock_pkt { u32 len; u32 off; bool reply; + bool tap_delivered; }; struct virtio_vsock_pkt_info { |