diff options
Diffstat (limited to 'include/linux')
47 files changed, 2030 insertions, 142 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0f37a7d5fa77..0f24d701fbdc 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -279,6 +279,21 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) /* Validate the processor object's proc_id */ bool acpi_duplicate_processor_id(int proc_id); +/* Processor _CTS control */ +struct acpi_processor_power; + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +bool acpi_processor_claim_cst_control(void); +int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info); +#else +static inline bool acpi_processor_claim_cst_control(void) { return false; } +static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info) +{ + return -ENODEV; +} +#endif #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 47eb22a3b7f9..4c636c42ad68 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -328,6 +328,7 @@ struct queue_limits { unsigned int max_sectors; unsigned int max_segment_size; unsigned int physical_block_size; + unsigned int logical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; @@ -338,7 +339,6 @@ struct queue_limits { unsigned int discard_granularity; unsigned int discard_alignment; - unsigned short logical_block_size; unsigned short max_segments; unsigned short max_integrity_segments; unsigned short max_discard_segments; @@ -1077,7 +1077,7 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q, unsigned int max_write_same_sectors); extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_same_sectors); -extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); @@ -1291,7 +1291,7 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } -static inline unsigned short queue_logical_block_size(const struct request_queue *q) +static inline unsigned queue_logical_block_size(const struct request_queue *q) { int retval = 512; @@ -1301,7 +1301,7 @@ static inline unsigned short queue_logical_block_size(const struct request_queue return retval; } -static inline unsigned short bdev_logical_block_size(struct block_device *bdev) +static inline unsigned int bdev_logical_block_size(struct block_device *bdev) { return queue_logical_block_size(bdev_get_queue(bdev)); } diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 679a42253170..a81c13ac1972 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -153,26 +153,4 @@ static inline void bvec_advance(const struct bio_vec *bvec, } } -/* - * Get the last single-page segment from the multi-page bvec and store it - * in @seg - */ -static inline void mp_bvec_last_segment(const struct bio_vec *bvec, - struct bio_vec *seg) -{ - unsigned total = bvec->bv_offset + bvec->bv_len; - unsigned last_page = (total - 1) / PAGE_SIZE; - - seg->bv_page = bvec->bv_page + last_page; - - /* the whole segment is inside the last page */ - if (bvec->bv_offset >= last_page * PAGE_SIZE) { - seg->bv_offset = bvec->bv_offset % PAGE_SIZE; - seg->bv_len = bvec->bv_len; - } else { - seg->bv_offset = 0; - seg->bv_len = total - last_page * PAGE_SIZE; - } -} - #endif /* __LINUX_BVEC_ITER_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 9b3c720a31b1..5e3d45525bd3 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -18,6 +18,7 @@ #include <linux/can/error.h> #include <linux/can/led.h> #include <linux/can/netlink.h> +#include <linux/can/skb.h> #include <linux/netdevice.h> /* @@ -91,6 +92,36 @@ struct can_priv { #define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) #define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) +/* Check for outgoing skbs that have not been created by the CAN subsystem */ +static inline bool can_skb_headroom_valid(struct net_device *dev, + struct sk_buff *skb) +{ + /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ + if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) + return false; + + /* af_packet does not apply CAN skb specific settings */ + if (skb->ip_summed == CHECKSUM_NONE) { + /* init headroom */ + can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* preform proper loopback on capable devices */ + if (dev->flags & IFF_ECHO) + skb->pkt_type = PACKET_LOOPBACK; + else + skb->pkt_type = PACKET_HOST; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + } + + return true; +} + /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ static inline bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb) @@ -108,6 +139,9 @@ static inline bool can_dropped_invalid_skb(struct net_device *dev, } else goto inval_skb; + if (!can_skb_headroom_valid(dev, skb)) + goto inval_skb; + return false; inval_skb: diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 1dabe36bd011..ec2ef63771f0 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -77,6 +77,7 @@ struct cpuidle_state { #define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ #define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ +#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ struct cpuidle_device_kobj; struct cpuidle_state_kobj; @@ -115,7 +116,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); struct cpuidle_driver { const char *name; struct module *owner; - int refcnt; /* used by the cpuidle framework to setup the broadcast timer */ unsigned int bctimer:1; @@ -147,8 +147,6 @@ extern u64 cpuidle_poll_time(struct cpuidle_driver *drv, extern int cpuidle_register_driver(struct cpuidle_driver *drv); extern struct cpuidle_driver *cpuidle_get_driver(void); -extern struct cpuidle_driver *cpuidle_driver_ref(void); -extern void cpuidle_driver_unref(void); extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, bool disable); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); @@ -186,8 +184,6 @@ static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv, static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } -static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; } -static inline void cpuidle_driver_unref(void) {} static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, bool disable) { } static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index fb376b5b7281..c6f82d4bec9f 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -108,6 +108,20 @@ struct devfreq_dev_profile { }; /** + * struct devfreq_stats - Statistics of devfreq device behavior + * @total_trans: Number of devfreq transitions. + * @trans_table: Statistics of devfreq transitions. + * @time_in_state: Statistics of devfreq states. + * @last_update: The last time stats were updated. + */ +struct devfreq_stats { + unsigned int total_trans; + unsigned int *trans_table; + u64 *time_in_state; + u64 last_update; +}; + +/** * struct devfreq - Device devfreq structure * @node: list node - contains the devices with devfreq that have been * registered. @@ -122,6 +136,7 @@ struct devfreq_dev_profile { * devfreq.nb to the corresponding register notifier call chain. * @work: delayed work for load monitoring. * @previous_freq: previously configured frequency value. + * @last_status: devfreq user device info, performance statistics * @data: Private data of the governor. The devfreq framework does not * touch this. * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs) @@ -132,15 +147,12 @@ struct devfreq_dev_profile { * @suspend_freq: frequency of a device set during suspend phase. * @resume_freq: frequency of a device set in resume phase. * @suspend_count: suspend requests counter for a device. - * @total_trans: Number of devfreq transitions - * @trans_table: Statistics of devfreq transitions - * @time_in_state: Statistics of devfreq states - * @last_stat_updated: The last time stat updated + * @stats: Statistics of devfreq device behavior * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier * @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY * @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY * - * This structure stores the devfreq information for a give device. + * This structure stores the devfreq information for a given device. * * Note that when a governor accesses entries in struct devfreq in its * functions except for the context of callbacks defined in struct @@ -174,11 +186,8 @@ struct devfreq { unsigned long resume_freq; atomic_t suspend_count; - /* information for device frequency transition */ - unsigned int total_trans; - unsigned int *trans_table; - unsigned long *time_in_state; - unsigned long last_stat_updated; + /* information for device frequency transitions */ + struct devfreq_stats stats; struct srcu_notifier_head transition_notifier_list; diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h new file mode 100644 index 000000000000..61d5cc0ad601 --- /dev/null +++ b/include/linux/dma/k3-psil.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_PSIL_H_ +#define K3_PSIL_H_ + +#include <linux/types.h> + +#define K3_PSIL_DST_THREAD_ID_OFFSET 0x8000 + +struct device; + +/** + * enum udma_tp_level - Channel Throughput Levels + * @UDMA_TP_NORMAL: Normal channel + * @UDMA_TP_HIGH: High Throughput channel + * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel + */ +enum udma_tp_level { + UDMA_TP_NORMAL = 0, + UDMA_TP_HIGH, + UDMA_TP_ULTRAHIGH, + UDMA_TP_LAST, +}; + +/** + * enum psil_endpoint_type - PSI-L Endpoint type + * @PSIL_EP_NATIVE: Normal channel + * @PSIL_EP_PDMA_XY: XY mode PDMA + * @PSIL_EP_PDMA_MCAN: MCAN mode PDMA + * @PSIL_EP_PDMA_AASRC: AASRC mode PDMA + */ +enum psil_endpoint_type { + PSIL_EP_NATIVE = 0, + PSIL_EP_PDMA_XY, + PSIL_EP_PDMA_MCAN, + PSIL_EP_PDMA_AASRC, +}; + +/** + * struct psil_endpoint_config - PSI-L Endpoint configuration + * @ep_type: PSI-L endpoint type + * @pkt_mode: If set, the channel must be in Packet mode, otherwise in + * TR mode + * @notdpkt: TDCM must be suppressed on the TX channel + * @needs_epib: Endpoint needs EPIB + * @psd_size: If set, PSdata is used by the endpoint + * @channel_tpl: Desired throughput level for the channel + * @pdma_acc32: ACC32 must be enabled on the PDMA side + * @pdma_burst: BURST must be enabled on the PDMA side + */ +struct psil_endpoint_config { + enum psil_endpoint_type ep_type; + + unsigned pkt_mode:1; + unsigned notdpkt:1; + unsigned needs_epib:1; + u32 psd_size; + enum udma_tp_level channel_tpl; + + /* PDMA properties, valid for PSIL_EP_PDMA_* */ + unsigned pdma_acc32:1; + unsigned pdma_burst:1; +}; + +int psil_set_new_ep_config(struct device *dev, const char *name, + struct psil_endpoint_config *ep_config); + +#endif /* K3_PSIL_H_ */ diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h new file mode 100644 index 000000000000..caadbab1632a --- /dev/null +++ b/include/linux/dma/k3-udma-glue.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_UDMA_GLUE_H_ +#define K3_UDMA_GLUE_H_ + +#include <linux/types.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/dma/ti-cppi5.h> + +struct k3_udma_glue_tx_channel_cfg { + struct k3_ring_cfg tx_cfg; + struct k3_ring_cfg txcq_cfg; + + bool tx_pause_on_err; + bool tx_filt_einfo; + bool tx_filt_pswords; + bool tx_supr_tdpkt; + u32 swdata_size; +}; + +struct k3_udma_glue_tx_channel; + +struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, + const char *name, struct k3_udma_glue_tx_channel_cfg *cfg); + +void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn); +int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + struct cppi5_host_desc_t *desc_tx, + dma_addr_t desc_dma); +int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + dma_addr_t *desc_dma); +int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn); +void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn); +void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + bool sync); +void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + void *data, void (*cleanup)(void *data, dma_addr_t desc_dma)); +u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn); +u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn); +int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn); + +enum { + K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0, + K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1, + K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2, + K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4, +}; + +/** + * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg + * + * @rx_cfg: RX ring configuration + * @rxfdq_cfg: RX free Host PD ring configuration + * @ring_rxq_id: RX ring id (or -1 for any) + * @ring_rxfdq0_id: RX free Host PD ring (FDQ) if (or -1 for any) + * @rx_error_handling: Rx Error Handling Mode (0 - drop, 1 - re-try) + * @src_tag_lo_sel: Rx Source Tag Low Byte Selector in Host PD + */ +struct k3_udma_glue_rx_flow_cfg { + struct k3_ring_cfg rx_cfg; + struct k3_ring_cfg rxfdq_cfg; + int ring_rxq_id; + int ring_rxfdq0_id; + bool rx_error_handling; + int src_tag_lo_sel; +}; + +/** + * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg + * + * @psdata_size: SW Data is present in Host PD of @swdata_size bytes + * @flow_id_base: first flow_id used by channel. + * if @flow_id_base = -1 - range of GP rflows will be + * allocated dynamically. + * @flow_id_num: number of RX flows used by channel + * @flow_id_use_rxchan_id: use RX channel id as flow id, + * used only if @flow_id_num = 1 + * @remote indication that RX channel is remote - some remote CPU + * core owns and control the RX channel. Linux Host only + * allowed to attach and configure RX Flow within RX + * channel. if set - not RX channel operation will be + * performed by K3 NAVSS DMA glue interface. + * @def_flow_cfg default RX flow configuration, + * used only if @flow_id_num = 1 + */ +struct k3_udma_glue_rx_channel_cfg { + u32 swdata_size; + int flow_id_base; + int flow_id_num; + bool flow_id_use_rxchan_id; + bool remote; + + struct k3_udma_glue_rx_flow_cfg *def_flow_cfg; +}; + +struct k3_udma_glue_rx_channel; + +struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn( + struct device *dev, + const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg); + +void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn); +int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn); +void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn); +void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + bool sync); +int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, struct cppi5_host_desc_t *desc_tx, + dma_addr_t desc_dma); +int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, dma_addr_t *desc_dma); +int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg); +u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx); +u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn); +int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num); +void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num); +void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma), + bool skip_fdq); +int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx); +int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx); + +#endif /* K3_UDMA_GLUE_H_ */ diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h new file mode 100644 index 000000000000..579356ae447e --- /dev/null +++ b/include/linux/dma/ti-cppi5.h @@ -0,0 +1,1059 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CPPI5 descriptors interface + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef __TI_CPPI5_H__ +#define __TI_CPPI5_H__ + +#include <linux/bitops.h> +#include <linux/printk.h> +#include <linux/bug.h> + +/** + * struct cppi5_desc_hdr_t - Descriptor header, present in all types of + * descriptors + * @pkt_info0: Packet info word 0 (n/a in Buffer desc) + * @pkt_info0: Packet info word 1 (n/a in Buffer desc) + * @pkt_info0: Packet info word 2 (n/a in Buffer desc) + * @src_dst_tag: Packet info word 3 (n/a in Buffer desc) + */ +struct cppi5_desc_hdr_t { + u32 pkt_info0; + u32 pkt_info1; + u32 pkt_info2; + u32 src_dst_tag; +} __packed; + +/** + * struct cppi5_host_desc_t - Host-mode packet and buffer descriptor definition + * @hdr: Descriptor header + * @next_desc: word 4/5: Linking word + * @buf_ptr: word 6/7: Buffer pointer + * @buf_info1: word 8: Buffer valid data length + * @org_buf_len: word 9: Original buffer length + * @org_buf_ptr: word 10/11: Original buffer pointer + * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or + * Protocol Specific Data (optional, 0-128 bytes in + * multiples of 4), and/or + * Other Software Data (0-N bytes, optional) + */ +struct cppi5_host_desc_t { + struct cppi5_desc_hdr_t hdr; + u64 next_desc; + u64 buf_ptr; + u32 buf_info1; + u32 org_buf_len; + u64 org_buf_ptr; + u32 epib[0]; +} __packed; + +#define CPPI5_DESC_MIN_ALIGN (16U) + +#define CPPI5_INFO0_HDESC_EPIB_SIZE (16U) +#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE (128U) + +#define CPPI5_INFO0_HDESC_TYPE_SHIFT (30U) +#define CPPI5_INFO0_HDESC_TYPE_MASK GENMASK(31, 30) +#define CPPI5_INFO0_DESC_TYPE_VAL_HOST (1U) +#define CPPI5_INFO0_DESC_TYPE_VAL_MONO (2U) +#define CPPI5_INFO0_DESC_TYPE_VAL_TR (3U) +#define CPPI5_INFO0_HDESC_EPIB_PRESENT BIT(29) +/* + * Protocol Specific Words location: + * 0 - located in the descriptor, + * 1 = located in the SOP Buffer immediately prior to the data. + */ +#define CPPI5_INFO0_HDESC_PSINFO_LOCATION BIT(28) +#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT (22U) +#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK GENMASK(27, 22) +#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT (0) +#define CPPI5_INFO0_HDESC_PKTLEN_MASK GENMASK(21, 0) + +#define CPPI5_INFO1_DESC_PKTERROR_SHIFT (28U) +#define CPPI5_INFO1_DESC_PKTERROR_MASK GENMASK(31, 28) +#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT (24U) +#define CPPI5_INFO1_HDESC_PSFLGS_MASK GENMASK(27, 24) +#define CPPI5_INFO1_DESC_PKTID_SHIFT (14U) +#define CPPI5_INFO1_DESC_PKTID_MASK GENMASK(23, 14) +#define CPPI5_INFO1_DESC_FLOWID_SHIFT (0) +#define CPPI5_INFO1_DESC_FLOWID_MASK GENMASK(13, 0) +#define CPPI5_INFO1_DESC_FLOWID_DEFAULT CPPI5_INFO1_DESC_FLOWID_MASK + +#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT (27U) +#define CPPI5_INFO2_HDESC_PKTTYPE_MASK GENMASK(31, 27) +/* Return Policy: 0 - Entire packet 1 - Each buffer */ +#define CPPI5_INFO2_HDESC_RETPOLICY BIT(18) +/* + * Early Return: + * 0 = desc pointers should be returned after all reads have been completed + * 1 = desc pointers should be returned immediately upon fetching + * the descriptor and beginning to transfer data. + */ +#define CPPI5_INFO2_HDESC_EARLYRET BIT(17) +/* + * Return Push Policy: + * 0 = Descriptor must be returned to tail of queue + * 1 = Descriptor must be returned to head of queue + */ +#define CPPI5_INFO2_DESC_RETPUSHPOLICY BIT(16) +#define CPPI5_INFO2_DESC_RETP_MASK GENMASK(18, 16) + +#define CPPI5_INFO2_DESC_RETQ_SHIFT (0) +#define CPPI5_INFO2_DESC_RETQ_MASK GENMASK(15, 0) + +#define CPPI5_INFO3_DESC_SRCTAG_SHIFT (16U) +#define CPPI5_INFO3_DESC_SRCTAG_MASK GENMASK(31, 16) +#define CPPI5_INFO3_DESC_DSTTAG_SHIFT (0) +#define CPPI5_INFO3_DESC_DSTTAG_MASK GENMASK(15, 0) + +#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT (0) +#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK GENMASK(27, 0) + +#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT (0) +#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK GENMASK(27, 0) + +/** + * struct cppi5_desc_epib_t - Host Packet Descriptor Extended Packet Info Block + * @timestamp: word 0: application specific timestamp + * @sw_info0: word 1: Software Info 0 + * @sw_info1: word 1: Software Info 1 + * @sw_info2: word 1: Software Info 2 + */ +struct cppi5_desc_epib_t { + u32 timestamp; /* w0: application specific timestamp */ + u32 sw_info0; /* w1: Software Info 0 */ + u32 sw_info1; /* w2: Software Info 1 */ + u32 sw_info2; /* w3: Software Info 2 */ +}; + +/** + * struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor + * @hdr: Descriptor header + * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or + * Protocol Specific Data (optional, 0-128 bytes in + * multiples of 4), and/or + * Other Software Data (0-N bytes, optional) + */ +struct cppi5_monolithic_desc_t { + struct cppi5_desc_hdr_t hdr; + u32 epib[0]; +}; + +#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U) +#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK GENMASK(26, 18) + +/* + * Reload Count: + * 0 = Finish the packet and place the descriptor back on the return queue + * 1-0x1ff = Vector to the Reload Index and resume processing + * 0x1ff indicates perpetual loop, infinite reload until the channel is stopped + */ +#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT (20U) +#define CPPI5_INFO0_TRDESC_RLDCNT_MASK GENMASK(28, 20) +#define CPPI5_INFO0_TRDESC_RLDCNT_MAX (0x1ff) +#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE CPPI5_INFO0_TRDESC_RLDCNT_MAX +#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT (14U) +#define CPPI5_INFO0_TRDESC_RLDIDX_MASK GENMASK(19, 14) +#define CPPI5_INFO0_TRDESC_RLDIDX_MAX (0x3f) +#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT (0) +#define CPPI5_INFO0_TRDESC_LASTIDX_MASK GENMASK(13, 0) + +#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT (24U) +#define CPPI5_INFO1_TRDESC_RECSIZE_MASK GENMASK(26, 24) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B (0) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B (1U) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B (2U) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B (3U) + +static inline void cppi5_desc_dump(void *desc, u32 size) +{ + print_hex_dump(KERN_ERR, "dump udmap_desc: ", DUMP_PREFIX_NONE, + 32, 4, desc, size, false); +} + +#define CPPI5_TDCM_MARKER (0x1) +/** + * cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message + * @paddr: Physical address of the packet popped from the ring + * + * Returns true if the address indicates TDCM + */ +static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr) +{ + return (paddr & CPPI5_TDCM_MARKER) ? true : false; +} + +/** + * cppi5_desc_get_type - get descriptor type + * @desc_hdr: packet descriptor/TR header + * + * Returns descriptor type: + * CPPI5_INFO0_DESC_TYPE_VAL_HOST + * CPPI5_INFO0_DESC_TYPE_VAL_MONO + * CPPI5_INFO0_DESC_TYPE_VAL_TR + */ +static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr) +{ + return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >> + CPPI5_INFO0_HDESC_TYPE_SHIFT; +} + +/** + * cppi5_desc_get_errflags - get Error Flags from Desc + * @desc_hdr: packet/TR descriptor header + * + * Returns Error Flags from Packet/TR Descriptor + */ +static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr) +{ + return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >> + CPPI5_INFO1_DESC_PKTERROR_SHIFT; +} + +/** + * cppi5_desc_get_pktids - get Packet and Flow ids from Desc + * @desc_hdr: packet/TR descriptor header + * @pkt_id: Packet ID + * @flow_id: Flow ID + * + * Returns Packet and Flow ids from packet/TR descriptor + */ +static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr, + u32 *pkt_id, u32 *flow_id) +{ + *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >> + CPPI5_INFO1_DESC_PKTID_SHIFT; + *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >> + CPPI5_INFO1_DESC_FLOWID_SHIFT; +} + +/** + * cppi5_desc_set_pktids - set Packet and Flow ids in Desc + * @desc_hdr: packet/TR descriptor header + * @pkt_id: Packet ID + * @flow_id: Flow ID + */ +static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr, + u32 pkt_id, u32 flow_id) +{ + desc_hdr->pkt_info1 &= ~(CPPI5_INFO1_DESC_PKTID_MASK | + CPPI5_INFO1_DESC_FLOWID_MASK); + desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) & + CPPI5_INFO1_DESC_PKTID_MASK; + desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) & + CPPI5_INFO1_DESC_FLOWID_MASK; +} + +/** + * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc + * @desc_hdr: packet/TR descriptor header + * @flags: fags, supported values + * CPPI5_INFO2_HDESC_RETPOLICY + * CPPI5_INFO2_HDESC_EARLYRET + * CPPI5_INFO2_DESC_RETPUSHPOLICY + * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved + */ +static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr, + u32 flags, u32 return_ring_id) +{ + desc_hdr->pkt_info2 &= ~(CPPI5_INFO2_DESC_RETP_MASK | + CPPI5_INFO2_DESC_RETQ_MASK); + desc_hdr->pkt_info2 |= flags & CPPI5_INFO2_DESC_RETP_MASK; + desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK; +} + +/** + * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc + * @desc_hdr: packet/TR descriptor header + * @src_tag_id: Source Tag + * @dst_tag_id: Dest Tag + * + * Returns Packet Src/Dst Tags from packet/TR descriptor + */ +static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr, + u32 *src_tag_id, u32 *dst_tag_id) +{ + if (src_tag_id) + *src_tag_id = (desc_hdr->src_dst_tag & + CPPI5_INFO3_DESC_SRCTAG_MASK) >> + CPPI5_INFO3_DESC_SRCTAG_SHIFT; + if (dst_tag_id) + *dst_tag_id = desc_hdr->src_dst_tag & + CPPI5_INFO3_DESC_DSTTAG_MASK; +} + +/** + * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc + * @desc_hdr: packet/TR descriptor header + * @src_tag_id: Source Tag + * @dst_tag_id: Dest Tag + * + * Returns Packet Src/Dst Tags from packet/TR descriptor + */ +static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr, + u32 src_tag_id, u32 dst_tag_id) +{ + desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) & + CPPI5_INFO3_DESC_SRCTAG_MASK; + desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK; +} + +/** + * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size + * @epib: is EPIB present + * @psdata_size: PSDATA size + * @sw_data_size: SWDATA size + * + * Returns required Host Packet Descriptor size + * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE + */ +static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size, + u32 sw_data_size) +{ + u32 desc_size; + + if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE) + return 0; + + desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size + + sw_data_size; + + if (epib) + desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE; + + return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN); +} + +/** + * cppi5_hdesc_init - Init Host Packet Descriptor size + * @desc: Host packet descriptor + * @flags: supported values + * CPPI5_INFO0_HDESC_EPIB_PRESENT + * CPPI5_INFO0_HDESC_PSINFO_LOCATION + * @psdata_size: PSDATA size + * + * Returns required Host Packet Descriptor size + * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE + */ +static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags, + u32 psdata_size) +{ + desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST << + CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags); + desc->hdr.pkt_info0 |= ((psdata_size >> 2) << + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK; + desc->next_desc = 0; +} + +/** + * cppi5_hdesc_update_flags - Replace descriptor flags + * @desc: Host packet descriptor + * @flags: supported values + * CPPI5_INFO0_HDESC_EPIB_PRESENT + * CPPI5_INFO0_HDESC_PSINFO_LOCATION + */ +static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc, + u32 flags) +{ + desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT | + CPPI5_INFO0_HDESC_PSINFO_LOCATION); + desc->hdr.pkt_info0 |= flags; +} + +/** + * cppi5_hdesc_update_psdata_size - Replace PSdata size + * @desc: Host packet descriptor + * @psdata_size: PSDATA size + */ +static inline void +cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size) +{ + desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK; + desc->hdr.pkt_info0 |= ((psdata_size >> 2) << + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK; +} + +/** + * cppi5_hdesc_get_psdata_size - get PSdata size in bytes + * @desc: Host packet descriptor + */ +static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc) +{ + u32 psdata_size = 0; + + if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)) + psdata_size = (desc->hdr.pkt_info0 & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >> + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT; + + return (psdata_size << 2); +} + +/** + * cppi5_hdesc_get_pktlen - get Packet Length from HDesc + * @desc: Host packet descriptor + * + * Returns Packet Length from Host Packet Descriptor + */ +static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc) +{ + return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK); +} + +/** + * cppi5_hdesc_set_pktlen - set Packet Length in HDesc + * @desc: Host packet descriptor + */ +static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc, + u32 pkt_len) +{ + desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PKTLEN_MASK; + desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK); +} + +/** + * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc + * @desc: Host packet descriptor + * + * Returns Protocol Specific Flags from Host Packet Descriptor + */ +static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc) +{ + return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >> + CPPI5_INFO1_HDESC_PSFLGS_SHIFT; +} + +/** + * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc + * @desc: Host packet descriptor + */ +static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc, + u32 ps_flags) +{ + desc->hdr.pkt_info1 &= ~CPPI5_INFO1_HDESC_PSFLGS_MASK; + desc->hdr.pkt_info1 |= (ps_flags << + CPPI5_INFO1_HDESC_PSFLGS_SHIFT) & + CPPI5_INFO1_HDESC_PSFLGS_MASK; +} + +/** + * cppi5_hdesc_get_errflags - get Packet Type from HDesc + * @desc: Host packet descriptor + */ +static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc) +{ + return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >> + CPPI5_INFO2_HDESC_PKTTYPE_SHIFT; +} + +/** + * cppi5_hdesc_get_errflags - set Packet Type in HDesc + * @desc: Host packet descriptor + * @pkt_type: Packet Type + */ +static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc, + u32 pkt_type) +{ + desc->hdr.pkt_info2 &= ~CPPI5_INFO2_HDESC_PKTTYPE_MASK; + desc->hdr.pkt_info2 |= + (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) & + CPPI5_INFO2_HDESC_PKTTYPE_MASK; +} + +/** + * cppi5_hdesc_attach_buf - attach buffer to HDesc + * @desc: Host packet descriptor + * @buf: Buffer physical address + * @buf_data_len: Buffer length + * @obuf: Original Buffer physical address + * @obuf_len: Original Buffer length + * + * Attaches buffer to Host Packet Descriptor + */ +static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc, + dma_addr_t buf, u32 buf_data_len, + dma_addr_t obuf, u32 obuf_len) +{ + desc->buf_ptr = buf; + desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK; + desc->org_buf_ptr = obuf; + desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK; +} + +static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc, + dma_addr_t *obuf, u32 *obuf_len) +{ + *obuf = desc->org_buf_ptr; + *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK; +} + +static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc) +{ + desc->buf_ptr = desc->org_buf_ptr; + desc->buf_info1 = desc->org_buf_len; +} + +/** + * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc + * @desc: Host Packet Descriptor + * @buf_desc: Host Buffer Descriptor physical address + * + * add and link Host Buffer Descriptor to HDesc + */ +static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc, + dma_addr_t hbuf_desc) +{ + desc->next_desc = hbuf_desc; +} + +static inline dma_addr_t +cppi5_hdesc_get_next_hbdesc(struct cppi5_host_desc_t *desc) +{ + return (dma_addr_t)desc->next_desc; +} + +static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc) +{ + desc->hdr = (struct cppi5_desc_hdr_t) { 0 }; + desc->next_desc = 0; +} + +/** + * cppi5_hdesc_epib_present - check if EPIB present + * @desc_hdr: packet descriptor/TR header + * + * Returns true if EPIB present in the packet + */ +static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr) +{ + return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT); +} + +/** + * cppi5_hdesc_get_psdata - Get pointer on PSDATA + * @desc: Host packet descriptor + * + * Returns pointer on PSDATA in HDesc. + * NULL - if ps_data placed at the start of data buffer. + */ +static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc) +{ + u32 psdata_size; + void *psdata; + + if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION) + return NULL; + + psdata_size = (desc->hdr.pkt_info0 & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >> + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT; + + if (!psdata_size) + return NULL; + + psdata = &desc->epib; + + if (cppi5_hdesc_epib_present(&desc->hdr)) + psdata += CPPI5_INFO0_HDESC_EPIB_SIZE; + + return psdata; +} + +/** + * cppi5_hdesc_get_swdata - Get pointer on swdata + * @desc: Host packet descriptor + * + * Returns pointer on SWDATA in HDesc. + * NOTE. It's caller responsibility to be sure hdesc actually has swdata. + */ +static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc) +{ + u32 psdata_size = 0; + void *swdata; + + if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)) + psdata_size = (desc->hdr.pkt_info0 & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >> + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT; + + swdata = &desc->epib; + + if (cppi5_hdesc_epib_present(&desc->hdr)) + swdata += CPPI5_INFO0_HDESC_EPIB_SIZE; + + swdata += (psdata_size << 2); + + return swdata; +} + +/* ================================== TR ================================== */ + +#define CPPI5_TR_TYPE_SHIFT (0U) +#define CPPI5_TR_TYPE_MASK GENMASK(3, 0) +#define CPPI5_TR_STATIC BIT(4) +#define CPPI5_TR_WAIT BIT(5) +#define CPPI5_TR_EVENT_SIZE_SHIFT (6U) +#define CPPI5_TR_EVENT_SIZE_MASK GENMASK(7, 6) +#define CPPI5_TR_TRIGGER0_SHIFT (8U) +#define CPPI5_TR_TRIGGER0_MASK GENMASK(9, 8) +#define CPPI5_TR_TRIGGER0_TYPE_SHIFT (10U) +#define CPPI5_TR_TRIGGER0_TYPE_MASK GENMASK(11, 10) +#define CPPI5_TR_TRIGGER1_SHIFT (12U) +#define CPPI5_TR_TRIGGER1_MASK GENMASK(13, 12) +#define CPPI5_TR_TRIGGER1_TYPE_SHIFT (14U) +#define CPPI5_TR_TRIGGER1_TYPE_MASK GENMASK(15, 14) +#define CPPI5_TR_CMD_ID_SHIFT (16U) +#define CPPI5_TR_CMD_ID_MASK GENMASK(23, 16) +#define CPPI5_TR_CSF_FLAGS_SHIFT (24U) +#define CPPI5_TR_CSF_FLAGS_MASK GENMASK(31, 24) +#define CPPI5_TR_CSF_SA_INDIRECT BIT(0) +#define CPPI5_TR_CSF_DA_INDIRECT BIT(1) +#define CPPI5_TR_CSF_SUPR_EVT BIT(2) +#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U) +#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4) +#define CPPI5_TR_CSF_EOP BIT(7) + +/** + * enum cppi5_tr_types - TR types + * @CPPI5_TR_TYPE0: One dimensional data move + * @CPPI5_TR_TYPE1: Two dimensional data move + * @CPPI5_TR_TYPE2: Three dimensional data move + * @CPPI5_TR_TYPE3: Four dimensional data move + * @CPPI5_TR_TYPE4: Four dimensional data move with data formatting + * @CPPI5_TR_TYPE5: Four dimensional Cache Warm + * @CPPI5_TR_TYPE8: Four Dimensional Block Move + * @CPPI5_TR_TYPE9: Four Dimensional Block Move with Repacking + * @CPPI5_TR_TYPE10: Two Dimensional Block Move + * @CPPI5_TR_TYPE11: Two Dimensional Block Move with Repacking + * @CPPI5_TR_TYPE15: Four Dimensional Block Move with Repacking and + * Indirection + */ +enum cppi5_tr_types { + CPPI5_TR_TYPE0 = 0, + CPPI5_TR_TYPE1, + CPPI5_TR_TYPE2, + CPPI5_TR_TYPE3, + CPPI5_TR_TYPE4, + CPPI5_TR_TYPE5, + /* type6-7: Reserved */ + CPPI5_TR_TYPE8 = 8, + CPPI5_TR_TYPE9, + CPPI5_TR_TYPE10, + CPPI5_TR_TYPE11, + /* type12-14: Reserved */ + CPPI5_TR_TYPE15 = 15, + CPPI5_TR_TYPE_MAX +}; + +/** + * enum cppi5_tr_event_size - TR Flags EVENT_SIZE field specifies when an event + * is generated for each TR. + * @CPPI5_TR_EVENT_SIZE_COMPLETION: When TR is complete and all status for + * the TR has been received + * @CPPI5_TR_EVENT_SIZE_ICNT1_DEC: Type 0: when the last data transaction + * is sent for the TR + * Type 1-11: when ICNT1 is decremented + * @CPPI5_TR_EVENT_SIZE_ICNT2_DEC: Type 0-1,10-11: when the last data + * transaction is sent for the TR + * All other types: when ICNT2 is + * decremented + * @CPPI5_TR_EVENT_SIZE_ICNT3_DEC: Type 0-2,10-11: when the last data + * transaction is sent for the TR + * All other types: when ICNT3 is + * decremented + */ +enum cppi5_tr_event_size { + CPPI5_TR_EVENT_SIZE_COMPLETION, + CPPI5_TR_EVENT_SIZE_ICNT1_DEC, + CPPI5_TR_EVENT_SIZE_ICNT2_DEC, + CPPI5_TR_EVENT_SIZE_ICNT3_DEC, + CPPI5_TR_EVENT_SIZE_MAX +}; + +/** + * enum cppi5_tr_trigger - TR Flags TRIGGERx field specifies the type of trigger + * used to enable the TR to transfer data as specified + * by TRIGGERx_TYPE field. + * @CPPI5_TR_TRIGGER_NONE: No trigger + * @CPPI5_TR_TRIGGER_GLOBAL0: Global trigger 0 + * @CPPI5_TR_TRIGGER_GLOBAL1: Global trigger 1 + * @CPPI5_TR_TRIGGER_LOCAL_EVENT: Local Event + */ +enum cppi5_tr_trigger { + CPPI5_TR_TRIGGER_NONE, + CPPI5_TR_TRIGGER_GLOBAL0, + CPPI5_TR_TRIGGER_GLOBAL1, + CPPI5_TR_TRIGGER_LOCAL_EVENT, + CPPI5_TR_TRIGGER_MAX +}; + +/** + * enum cppi5_tr_trigger_type - TR Flags TRIGGERx_TYPE field specifies the type + * of data transfer that will be enabled by + * receiving a trigger as specified by TRIGGERx. + * @CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC: The second inner most loop (ICNT1) will + * be decremented by 1 + * @CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC: The third inner most loop (ICNT2) will + * be decremented by 1 + * @CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC: The outer most loop (ICNT3) will be + * decremented by 1 + * @CPPI5_TR_TRIGGER_TYPE_ALL: The entire TR will be allowed to + * complete + */ +enum cppi5_tr_trigger_type { + CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC, + CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, + CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC, + CPPI5_TR_TRIGGER_TYPE_ALL, + CPPI5_TR_TRIGGER_TYPE_MAX +}; + +typedef u32 cppi5_tr_flags_t; + +/** + * struct cppi5_tr_type0_t - Type 0 (One dimensional data move) TR (16 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) + * @_reserved: Not used + * @addr: Starting address for the source data or destination data + */ +struct cppi5_tr_type0_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 _reserved; + u64 addr; +} __aligned(16) __packed; + +/** + * struct cppi5_tr_type1_t - Type 1 (Two dimensional data move) TR (32 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) + * @icnt1: Total loop iteration count for level 1 + * @addr: Starting address for the source data or destination data + * @dim1: Signed dimension for loop level 1 + */ +struct cppi5_tr_type1_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; +} __aligned(32) __packed; + +/** + * struct cppi5_tr_type2_t - Type 2 (Three dimensional data move) TR (32 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) + * @icnt1: Total loop iteration count for level 1 + * @addr: Starting address for the source data or destination data + * @dim1: Signed dimension for loop level 1 + * @icnt2: Total loop iteration count for level 2 + * @_reserved: Not used + * @dim2: Signed dimension for loop level 2 + */ +struct cppi5_tr_type2_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; + u16 icnt2; + u16 _reserved; + s32 dim2; +} __aligned(32) __packed; + +/** + * struct cppi5_tr_type3_t - Type 3 (Four dimensional data move) TR (32 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) + * @icnt1: Total loop iteration count for level 1 + * @addr: Starting address for the source data or destination data + * @dim1: Signed dimension for loop level 1 + * @icnt2: Total loop iteration count for level 2 + * @icnt3: Total loop iteration count for level 3 (outermost) + * @dim2: Signed dimension for loop level 2 + * @dim3: Signed dimension for loop level 3 + */ +struct cppi5_tr_type3_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; + u16 icnt2; + u16 icnt3; + s32 dim2; + s32 dim3; +} __aligned(32) __packed; + +/** + * struct cppi5_tr_type15_t - Type 15 (Four Dimensional Block Copy with + * Repacking and Indirection Support) TR (64 byte) + * @flags: TR flags (type, triggers, event, configuration) + * @icnt0: Total loop iteration count for level 0 (innermost) for + * source + * @icnt1: Total loop iteration count for level 1 for source + * @addr: Starting address for the source data + * @dim1: Signed dimension for loop level 1 for source + * @icnt2: Total loop iteration count for level 2 for source + * @icnt3: Total loop iteration count for level 3 (outermost) for + * source + * @dim2: Signed dimension for loop level 2 for source + * @dim3: Signed dimension for loop level 3 for source + * @_reserved: Not used + * @ddim1: Signed dimension for loop level 1 for destination + * @daddr: Starting address for the destination data + * @ddim2: Signed dimension for loop level 2 for destination + * @ddim3: Signed dimension for loop level 3 for destination + * @dicnt0: Total loop iteration count for level 0 (innermost) for + * destination + * @dicnt1: Total loop iteration count for level 1 for destination + * @dicnt2: Total loop iteration count for level 2 for destination + * @sicnt3: Total loop iteration count for level 3 (outermost) for + * destination + */ +struct cppi5_tr_type15_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; + u16 icnt2; + u16 icnt3; + s32 dim2; + s32 dim3; + u32 _reserved; + s32 ddim1; + u64 daddr; + s32 ddim2; + s32 ddim3; + u16 dicnt0; + u16 dicnt1; + u16 dicnt2; + u16 dicnt3; +} __aligned(64) __packed; + +/** + * struct cppi5_tr_resp_t - TR response record + * @status: Status type and info + * @_reserved: Not used + * @cmd_id: Command ID for the TR for TR identification + * @flags: Configuration Specific Flags + */ +struct cppi5_tr_resp_t { + u8 status; + u8 _reserved; + u8 cmd_id; + u8 flags; +} __packed; + +#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT (0U) +#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK GENMASK(3, 0) +#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT (4U) +#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK GENMASK(7, 4) +#define CPPI5_TR_RESPONSE_CMDID_SHIFT (16U) +#define CPPI5_TR_RESPONSE_CMDID_MASK GENMASK(23, 16) +#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT (24U) +#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK GENMASK(31, 24) + +/** + * enum cppi5_tr_resp_status_type - TR Response Status Type field is used to + * determine what type of status is being + * returned. + * @CPPI5_TR_RESPONSE_STATUS_NONE: No error, completion: completed + * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR: Transfer Error, completion: none + * or partially completed + * @CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR: Aborted Error, completion: none + * or partially completed + * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR: Submission Error, completion: + * none + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR: Unsupported Error, completion: + * none + * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION: Transfer Exception, completion: + * partially completed + * @CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH: Teardown Flush, completion: none + */ +enum cppi5_tr_resp_status_type { + CPPI5_TR_RESPONSE_STATUS_NONE, + CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR, + CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR, + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR, + CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION, + CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH, + CPPI5_TR_RESPONSE_STATUS_MAX +}; + +/** + * enum cppi5_tr_resp_status_submission - TR Response Status field values which + * corresponds Submission Error + * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0: ICNT0 was 0 + * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL: Channel FIFO was full when TR + * received + * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN: Channel is not owned by the + * submitter + */ +enum cppi5_tr_resp_status_submission { + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0, + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL, + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN, + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX +}; + +/** + * enum cppi5_tr_resp_status_unsupported - TR Response Status field values which + * corresponds Unsupported Error + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE: TR Type not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC: STATIC not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL: EOL not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC: CONFIGURATION SPECIFIC + * not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE: AMODE not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE: ELTYPE not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT: DFMT not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR: SECTR not supported + * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC: AMODE SPECIFIC field + * not supported + */ +enum cppi5_tr_resp_status_unsupported { + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX +}; + +/** + * cppi5_trdesc_calc_size - Calculate TR Descriptor size + * @tr_count: number of TR records + * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128] + * + * Returns required TR Descriptor size + */ +static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size) +{ + /* + * The Size of a TR descriptor is: + * 1 x tr_size : the first 16 bytes is used by the packet info block + + * tr_count x tr_size : Transfer Request Records + + * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records + */ + return tr_size * (tr_count + 1) + + sizeof(struct cppi5_tr_resp_t) * tr_count; +} + +/** + * cppi5_trdesc_init - Init TR Descriptor + * @desc: TR Descriptor + * @tr_count: number of TR records + * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128] + * @reload_idx: Absolute index to jump to on the 2nd and following passes + * through the TR packet. + * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff + * indicates infinite looping. + * + * Init TR Descriptor + */ +static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr, + u32 tr_count, u32 tr_size, u32 reload_idx, + u32 reload_count) +{ + desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR << + CPPI5_INFO0_HDESC_TYPE_SHIFT; + desc_hdr->pkt_info0 |= + (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) & + CPPI5_INFO0_TRDESC_RLDCNT_MASK; + desc_hdr->pkt_info0 |= + (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) & + CPPI5_INFO0_TRDESC_RLDIDX_MASK; + desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK; + + desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) << + CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) & + CPPI5_INFO1_TRDESC_RECSIZE_MASK; +} + +/** + * cppi5_tr_init - Init TR record + * @flags: Pointer to the TR's flags + * @type: TR type + * @static_tr: TR is static + * @wait: Wait for TR completion before allow the next TR to start + * @event_size: output event generation cfg + * @cmd_id: TR identifier (application specifics) + * + * Init TR record + */ +static inline void cppi5_tr_init(cppi5_tr_flags_t *flags, + enum cppi5_tr_types type, bool static_tr, + bool wait, enum cppi5_tr_event_size event_size, + u32 cmd_id) +{ + *flags = type; + *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) & + CPPI5_TR_EVENT_SIZE_MASK; + + *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) & + CPPI5_TR_CMD_ID_MASK; + + if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9)) + *flags |= CPPI5_TR_STATIC; + + if (wait) + *flags |= CPPI5_TR_WAIT; +} + +/** + * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type + * @flags: Pointer to the TR's flags + * @trigger0: trigger0 selection + * @trigger0_type: type of data transfer that will be enabled by trigger0 + * @trigger1: trigger1 selection + * @trigger1_type: type of data transfer that will be enabled by trigger1 + * + * Configure the triggers for the TR + */ +static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags, + enum cppi5_tr_trigger trigger0, + enum cppi5_tr_trigger_type trigger0_type, + enum cppi5_tr_trigger trigger1, + enum cppi5_tr_trigger_type trigger1_type) +{ + *flags &= ~(CPPI5_TR_TRIGGER0_MASK | CPPI5_TR_TRIGGER0_TYPE_MASK | + CPPI5_TR_TRIGGER1_MASK | CPPI5_TR_TRIGGER1_TYPE_MASK); + *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) & + CPPI5_TR_TRIGGER0_MASK; + *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) & + CPPI5_TR_TRIGGER0_TYPE_MASK; + + *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) & + CPPI5_TR_TRIGGER1_MASK; + *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) & + CPPI5_TR_TRIGGER1_TYPE_MASK; +} + +/** + * cppi5_tr_cflag_set - Update the Configuration specific flags + * @flags: Pointer to the TR's flags + * @csf: Configuration specific flags + * + * Set a bit in Configuration Specific Flags section of the TR flags. + */ +static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf) +{ + *flags &= ~CPPI5_TR_CSF_FLAGS_MASK; + *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) & + CPPI5_TR_CSF_FLAGS_MASK; +} + +#endif /* __TI_CPPI5_H__ */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index dad4a68fa009..64461fc64e1b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -219,6 +219,62 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; * @bytes_transferred: byte counter */ +/** + * enum dma_desc_metadata_mode - per descriptor metadata mode types supported + * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the + * client driver and it is attached (via the dmaengine_desc_attach_metadata() + * helper) to the descriptor. + * + * Client drivers interested to use this mode can follow: + * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * construct the metadata in the client's buffer + * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the + * descriptor + * 3. submit the transfer + * - DMA_DEV_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the + * descriptor + * 3. submit the transfer + * 4. when the transfer is completed, the metadata should be available in the + * attached buffer + * + * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA + * driver. The client driver can ask for the pointer, maximum size and the + * currently used size of the metadata and can directly update or read it. + * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is + * provided as helper functions. + * + * Note: the metadata area for the descriptor is no longer valid after the + * transfer has been completed (valid up to the point when the completion + * callback returns if used). + * + * Client drivers interested to use this mode can follow: + * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's + * metadata area + * 3. update the metadata at the pointer + * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount + * of data the client has placed into the metadata buffer + * 5. submit the transfer + * - DMA_DEV_TO_MEM: + * 1. prepare the descriptor (dmaengine_prep_*) + * 2. submit the transfer + * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the + * pointer to the engine's metadata area + * 4. Read out the metadata from the pointer + * + * Note: the two mode is not compatible and clients must use one mode for a + * descriptor. + */ +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = BIT(0), + DESC_METADATA_ENGINE = BIT(1), +}; + struct dma_chan_percpu { /* stats */ unsigned long memcpy_count; @@ -238,10 +294,12 @@ struct dma_router { /** * struct dma_chan - devices supply DMA channels, clients use them * @device: ptr to the dma device who supplies this channel, always !%NULL + * @slave: ptr to the device using this channel * @cookie: last cookie value returned to client * @completed_cookie: last completed cookie for this channel * @chan_id: channel ID for sysfs * @dev: class device for sysfs + * @name: backlink name for sysfs * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client_count: how many clients are using this channel @@ -252,12 +310,14 @@ struct dma_router { */ struct dma_chan { struct dma_device *device; + struct device *slave; dma_cookie_t cookie; dma_cookie_t completed_cookie; /* sysfs */ int chan_id; struct dma_chan_dev *dev; + const char *name; struct list_head device_node; struct dma_chan_percpu __percpu *local; @@ -475,19 +535,36 @@ struct dmaengine_unmap_data { dma_addr_t addr[0]; }; +struct dma_async_tx_descriptor; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *desc, void *data, + size_t len); + + void *(*get_ptr)(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len); + int (*set_len)(struct dma_async_tx_descriptor *desc, + size_t payload_len); +}; + /** * struct dma_async_tx_descriptor - async transaction descriptor * ---dma generic offload fields--- * @cookie: tracking cookie for this transaction, set to -EBUSY if * this tx is sitting on a dependency list * @flags: flags to augment operation preparation, control completion, and - * communicate status + * communicate status * @phys: physical address of the descriptor * @chan: target channel for this operation * @tx_submit: accept the descriptor, assign ordered cookie and mark the * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine + * @desc_metadata_mode: core managed metadata mode to protect mixed use of + * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise + * DESC_METADATA_NONE + * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the + * DMA driver if metadata mode is supported with the descriptor * ---async_tx api specific fields--- * @next: at completion submit this descriptor * @parent: pointer to the next level up in the dependency chain @@ -504,6 +581,8 @@ struct dma_async_tx_descriptor { dma_async_tx_callback_result callback_result; void *callback_param; struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; @@ -611,11 +690,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr * @residue: the remaining number of bytes left to transmit * on the selected transfer for states DMA_IN_PROGRESS and * DMA_PAUSED if this is implemented in the driver, else 0 + * @in_flight_bytes: amount of data in bytes cached by the DMA. */ struct dma_tx_state { dma_cookie_t last; dma_cookie_t used; u32 residue; + u32 in_flight_bytes; }; /** @@ -666,6 +747,7 @@ struct dma_filter { * @global_node: list_head for global dma_device_list * @filter: information for device/slave to filter function/param mapping * @cap_mask: one or more dma_capability flags + * @desc_metadata_modes: supported metadata modes by the DMA device * @max_xor: maximum number of xor sources, 0 if no capability * @max_pq: maximum number of PQ sources and PQ-continue capability * @copy_align: alignment shift for memcpy operations @@ -674,6 +756,7 @@ struct dma_filter { * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api + * @owner: owner module (automatically set based on the provided dev) * @src_addr_widths: bit mask of src addr widths the device supports * Width is specified in bytes, e.g. for a device supporting * a width of 4 the mask should have BIT(4) set. @@ -718,15 +801,21 @@ struct dma_filter { * will just return a simple status code * @device_issue_pending: push pending transactions to hardware * @descriptor_reuse: a submitted transfer can be resubmitted after completion + * @device_release: called sometime atfer dma_async_device_unregister() is + * called and there are no further references to this structure. This + * must be implemented to free resources however many existing drivers + * do not and are therefore not safe to unbind while in use. + * */ struct dma_device { - + struct kref ref; unsigned int chancnt; unsigned int privatecnt; struct list_head channels; struct list_head global_node; struct dma_filter filter; dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; unsigned short max_xor; unsigned short max_pq; enum dmaengine_alignment copy_align; @@ -737,6 +826,7 @@ struct dma_device { int dev_id; struct device *dev; + struct module *owner; u32 src_addr_widths; u32 dst_addr_widths; @@ -800,6 +890,7 @@ struct dma_device { dma_cookie_t cookie, struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); + void (*device_release)(struct dma_device *dev); }; static inline int dmaengine_slave_config(struct dma_chan *chan, @@ -902,6 +993,41 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( len, flags); } +static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan, + enum dma_desc_metadata_mode mode) +{ + if (!chan) + return false; + + return !!(chan->device->desc_metadata_modes & mode); +} + +#ifdef CONFIG_DMA_ENGINE +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len); +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len); +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len); +#else /* CONFIG_DMA_ENGINE */ +static inline int dmaengine_desc_attach_metadata( + struct dma_async_tx_descriptor *desc, void *data, size_t len) +{ + return -EINVAL; +} +static inline void *dmaengine_desc_get_metadata_ptr( + struct dma_async_tx_descriptor *desc, size_t *payload_len, + size_t *max_len) +{ + return NULL; +} +static inline int dmaengine_desc_set_metadata_len( + struct dma_async_tx_descriptor *desc, size_t payload_len) +{ + return -EINVAL; +} +#endif /* CONFIG_DMA_ENGINE */ + /** * dmaengine_terminate_all() - Terminate all active DMA transfers * @chan: The channel for which to terminate the transfers @@ -1402,16 +1528,16 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) int dma_async_device_register(struct dma_device *device); int dmaenginem_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan); +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); -struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); -struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); #define dma_request_channel(mask, x, y) \ __dma_request_channel(&(mask), x, y, NULL) -#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ - __dma_request_slave_channel_compat(&(mask), x, y, dev, name) static inline struct dma_chan -*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, +*dma_request_slave_channel_compat(const dma_cap_mask_t mask, dma_filter_fn fn, void *fn_param, struct device *dev, const char *name) { @@ -1424,6 +1550,25 @@ static inline struct dma_chan if (!fn || !fn_param) return NULL; - return __dma_request_channel(mask, fn, fn_param, NULL); + return __dma_request_channel(&mask, fn, fn_param, NULL); +} + +static inline char * +dmaengine_get_direction_text(enum dma_transfer_direction dir) +{ + switch (dir) { + case DMA_DEV_TO_MEM: + return "DEV_TO_MEM"; + case DMA_MEM_TO_DEV: + return "MEM_TO_DEV"; + case DMA_MEM_TO_MEM: + return "MEM_TO_MEM"; + case DMA_DEV_TO_DEV: + return "DEV_TO_DEV"; + default: + break; + } + + return "invalid"; } #endif /* DMAENGINE_H */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 8bb63027e4d6..ea4c133b4139 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -245,6 +245,18 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk) !(disk->flags & GENHD_FL_NO_PART_SCAN); } +static inline bool disk_has_partitions(struct gendisk *disk) +{ + bool ret = false; + + rcu_read_lock(); + if (rcu_dereference(disk->part_tbl)->len > 1) + ret = true; + rcu_read_unlock(); + + return ret; +} + static inline dev_t disk_devt(struct gendisk *disk) { return MKDEV(disk->major, disk->first_minor); diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 5215fdba6b9a..bf2d017dd7b7 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); +void gpiod_toggle_active_low(struct gpio_desc *desc); int gpiod_is_active_low(const struct gpio_desc *desc); int gpiod_cansleep(const struct gpio_desc *desc); @@ -483,6 +484,12 @@ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) return -ENOSYS; } +static inline void gpiod_toggle_active_low(struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); +} + static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 31d4920994b9..1e897e4168ac 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -432,7 +432,8 @@ struct hstate { unsigned int surplus_huge_pages_node[MAX_NUMNODES]; #ifdef CONFIG_CGROUP_HUGETLB /* cgroup control files */ - struct cftype cgroup_files[5]; + struct cftype cgroup_files_dfl[5]; + struct cftype cgroup_files_legacy[5]; #endif char name[HSTATE_NAME_LEN]; }; diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 72579168189d..5e609f25878c 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -27,6 +27,7 @@ enum hwmon_sensor_types { hwmon_humidity, hwmon_fan, hwmon_pwm, + hwmon_intrusion, hwmon_max, }; @@ -59,7 +60,8 @@ enum hwmon_chip_attributes { #define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples) enum hwmon_temp_attributes { - hwmon_temp_input = 0, + hwmon_temp_enable, + hwmon_temp_input, hwmon_temp_type, hwmon_temp_lcrit, hwmon_temp_lcrit_hyst, @@ -85,6 +87,7 @@ enum hwmon_temp_attributes { hwmon_temp_reset_history, }; +#define HWMON_T_ENABLE BIT(hwmon_temp_enable) #define HWMON_T_INPUT BIT(hwmon_temp_input) #define HWMON_T_TYPE BIT(hwmon_temp_type) #define HWMON_T_LCRIT BIT(hwmon_temp_lcrit) @@ -111,6 +114,7 @@ enum hwmon_temp_attributes { #define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history) enum hwmon_in_attributes { + hwmon_in_enable, hwmon_in_input, hwmon_in_min, hwmon_in_max, @@ -126,9 +130,9 @@ enum hwmon_in_attributes { hwmon_in_max_alarm, hwmon_in_lcrit_alarm, hwmon_in_crit_alarm, - hwmon_in_enable, }; +#define HWMON_I_ENABLE BIT(hwmon_in_enable) #define HWMON_I_INPUT BIT(hwmon_in_input) #define HWMON_I_MIN BIT(hwmon_in_min) #define HWMON_I_MAX BIT(hwmon_in_max) @@ -144,9 +148,9 @@ enum hwmon_in_attributes { #define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm) #define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm) #define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm) -#define HWMON_I_ENABLE BIT(hwmon_in_enable) enum hwmon_curr_attributes { + hwmon_curr_enable, hwmon_curr_input, hwmon_curr_min, hwmon_curr_max, @@ -164,6 +168,7 @@ enum hwmon_curr_attributes { hwmon_curr_crit_alarm, }; +#define HWMON_C_ENABLE BIT(hwmon_curr_enable) #define HWMON_C_INPUT BIT(hwmon_curr_input) #define HWMON_C_MIN BIT(hwmon_curr_min) #define HWMON_C_MAX BIT(hwmon_curr_max) @@ -181,6 +186,7 @@ enum hwmon_curr_attributes { #define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm) enum hwmon_power_attributes { + hwmon_power_enable, hwmon_power_average, hwmon_power_average_interval, hwmon_power_average_interval_max, @@ -211,6 +217,7 @@ enum hwmon_power_attributes { hwmon_power_crit_alarm, }; +#define HWMON_P_ENABLE BIT(hwmon_power_enable) #define HWMON_P_AVERAGE BIT(hwmon_power_average) #define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval) #define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max) @@ -241,14 +248,17 @@ enum hwmon_power_attributes { #define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm) enum hwmon_energy_attributes { + hwmon_energy_enable, hwmon_energy_input, hwmon_energy_label, }; +#define HWMON_E_ENABLE BIT(hwmon_energy_enable) #define HWMON_E_INPUT BIT(hwmon_energy_input) #define HWMON_E_LABEL BIT(hwmon_energy_label) enum hwmon_humidity_attributes { + hwmon_humidity_enable, hwmon_humidity_input, hwmon_humidity_label, hwmon_humidity_min, @@ -259,6 +269,7 @@ enum hwmon_humidity_attributes { hwmon_humidity_fault, }; +#define HWMON_H_ENABLE BIT(hwmon_humidity_enable) #define HWMON_H_INPUT BIT(hwmon_humidity_input) #define HWMON_H_LABEL BIT(hwmon_humidity_label) #define HWMON_H_MIN BIT(hwmon_humidity_min) @@ -269,6 +280,7 @@ enum hwmon_humidity_attributes { #define HWMON_H_FAULT BIT(hwmon_humidity_fault) enum hwmon_fan_attributes { + hwmon_fan_enable, hwmon_fan_input, hwmon_fan_label, hwmon_fan_min, @@ -282,6 +294,7 @@ enum hwmon_fan_attributes { hwmon_fan_fault, }; +#define HWMON_F_ENABLE BIT(hwmon_fan_enable) #define HWMON_F_INPUT BIT(hwmon_fan_input) #define HWMON_F_LABEL BIT(hwmon_fan_label) #define HWMON_F_MIN BIT(hwmon_fan_min) @@ -306,6 +319,13 @@ enum hwmon_pwm_attributes { #define HWMON_PWM_MODE BIT(hwmon_pwm_mode) #define HWMON_PWM_FREQ BIT(hwmon_pwm_freq) +enum hwmon_intrusion_attributes { + hwmon_intrusion_alarm, + hwmon_intrusion_beep, +}; +#define HWMON_INTRUSION_ALARM BIT(hwmon_intrusion_alarm) +#define HWMON_INTRUSION_BEEP BIT(hwmon_intrusion_beep) + /** * struct hwmon_ops - hwmon device operations * @is_visible: Callback to return attribute visibility. Mandatory. diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 76cf11e905e1..8a9792a6427a 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -24,6 +24,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb_mac_header(skb); } +/* Prefer this version in TX path, instead of + * skb_reset_mac_header() + eth_hdr() + */ +static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb->data; +} + static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) { return (struct ethhdr *)skb_inner_mac_header(skb); diff --git a/include/linux/io.h b/include/linux/io.h index a59834bc0a11..b1c44bb4b2d7 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -66,8 +66,6 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size); void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, resource_size_t size); -void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, - resource_size_t size); void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, resource_size_t size); void devm_iounmap(struct device *dev, void __iomem *addr); @@ -87,7 +85,7 @@ void *__devm_memremap_pages(struct device *dev, struct resource *res); * Posting") mandate non-posted configuration transactions. There is * no ioremap API in the kernel that can guarantee non-posted write * semantics across arches so provide a default implementation for - * mapping PCI config space that defaults to ioremap_nocache(); arches + * mapping PCI config space that defaults to ioremap(); arches * should override it if they have memory mapping implementations that * guarantee non-posted writes semantics to make the memory mapping * compliant with the PCI specification. @@ -97,7 +95,7 @@ void *__devm_memremap_pages(struct device *dev, struct resource *res); static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset, size_t size) { - return ioremap_nocache(offset, size); + return ioremap(offset, size); } #endif #endif diff --git a/include/linux/list.h b/include/linux/list.h index 85c92555e31f..3c391bbd03c3 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -539,6 +539,16 @@ static inline void list_splice_tail_init(struct list_head *list, for (pos = (head)->next; pos != (head); pos = pos->next) /** + * list_for_each_continue - continue iteration over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * Continue to iterate over a list, continuing after the current position. + */ +#define list_for_each_continue(pos, head) \ + for (pos = pos->next; pos != (head); pos = pos->next) + +/** * list_for_each_prev - iterate over a list backwards * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 915330abf6e5..99d629fd9944 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -74,6 +74,7 @@ struct common_audit_data { #define LSM_AUDIT_DATA_FILE 12 #define LSM_AUDIT_DATA_IBPKEY 13 #define LSM_AUDIT_DATA_IBENDPORT 14 +#define LSM_AUDIT_DATA_LOCKDOWN 15 union { struct path path; struct dentry *dentry; @@ -93,6 +94,7 @@ struct common_audit_data { struct file *file; struct lsm_ibpkey_audit *ibpkey; struct lsm_ibendport_audit *ibendport; + int reason; } u; /* this union contains LSM specific data */ union { diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h index f84b9163c0ee..7dfb63b81373 100644 --- a/include/linux/mfd/mt6397/rtc.h +++ b/include/linux/mfd/mt6397/rtc.h @@ -46,6 +46,14 @@ #define RTC_AL_SEC 0x0018 +#define RTC_AL_SEC_MASK 0x003f +#define RTC_AL_MIN_MASK 0x003f +#define RTC_AL_HOU_MASK 0x001f +#define RTC_AL_DOM_MASK 0x001f +#define RTC_AL_DOW_MASK 0x0007 +#define RTC_AL_MTH_MASK 0x000f +#define RTC_AL_YEA_MASK 0x007f + #define RTC_PDN2 0x002e #define RTC_PDN2_PWRON_ALARM BIT(4) diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 739b7bf37eaa..8ba042430d8e 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -79,9 +79,6 @@ /* Some controllers have a CBSY bit */ #define TMIO_MMC_HAVE_CBSY BIT(11) -/* Some controllers that support HS400 use 4 taps while others use 8. */ -#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) - int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); diff --git a/include/linux/mm.h b/include/linux/mm.h index 80a9162b406c..cfaa8feecfe8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2658,14 +2658,26 @@ static inline bool want_init_on_free(void) !page_poisoning_enabled(); } -#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT -DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); +#ifdef CONFIG_DEBUG_PAGEALLOC +extern void init_debug_pagealloc(void); #else -DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); +static inline void init_debug_pagealloc(void) {} #endif +extern bool _debug_pagealloc_enabled_early; +DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); static inline bool debug_pagealloc_enabled(void) { + return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && + _debug_pagealloc_enabled_early; +} + +/* + * For use in fast paths after init_debug_pagealloc() has run, or when a + * false negative result is not harmful when called too early. + */ +static inline bool debug_pagealloc_enabled_static(void) +{ if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) return false; diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 0de3d7c016cd..4ae2f2908f99 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -17,10 +17,9 @@ int mmc_gpio_get_ro(struct mmc_host *host); int mmc_gpio_get_cd(struct mmc_host *host); int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, - unsigned int debounce, bool *gpio_invert); + unsigned int debounce); int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, - unsigned int idx, - unsigned int debounce, bool *gpio_invert); + unsigned int idx, unsigned int debounce); void mmc_gpio_set_cd_isr(struct mmc_host *host, irqreturn_t (*isr)(int irq, void *dev_id)); int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 89d8ff06c9ce..5334ad8fc7bd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -215,9 +215,8 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */ - NR_SLAB_UNRECLAIMABLE, /* and this one without looking at - * memcg_flush_percpu_vmstats() first. */ + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index ecc88a41792a..c04f690871ca 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -40,7 +40,7 @@ typedef enum { FL_READING, FL_CACHEDPRG, /* These 4 come from onenand_state_t, which has been unified here */ - FL_RESETING, + FL_RESETTING, FL_OTPING, FL_PREPARING_ERASE, FL_VERIFYING_ERASE, diff --git a/include/linux/namei.h b/include/linux/namei.h index 7fe7b87a3ded..07bfb0874033 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -34,7 +34,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; /* internal use only */ #define LOOKUP_PARENT 0x0010 -#define LOOKUP_NO_REVAL 0x0080 #define LOOKUP_JUMPED 0x1000 #define LOOKUP_ROOT 0x2000 #define LOOKUP_ROOT_GRABBED 0x0008 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ae5e260911e2..cac56fb59af8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3698,6 +3698,8 @@ int dev_set_alias(struct net_device *, const char *, size_t); int dev_get_alias(const struct net_device *, char *, size_t); int dev_change_net_namespace(struct net_device *, struct net *, const char *); int __dev_set_mtu(struct net_device *, int); +int dev_validate_mtu(struct net_device *dev, int mtu, + struct netlink_ext_ack *extack); int dev_set_mtu_ext(struct net_device *dev, int mtu, struct netlink_ext_ack *extack); int dev_set_mtu(struct net_device *, int); diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 4d8b1eaf7708..908d38dbcb91 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -426,13 +426,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr) sizeof(*addr)); } -/* Calculate the bytes required to store the inclusive range of a-b */ -static inline int -bitmap_bytes(u32 a, u32 b) -{ - return 4 * ((((b - a + 8) / 8) + 3) / 4); -} - /* How often should the gc be run by default */ #define IPSET_GC_TIME (3 * 60) diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index cf09ab37b45b..851425c3178f 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -31,7 +31,7 @@ struct nfnetlink_subsystem { const struct nfnl_callback *cb; /* callback for individual types */ struct module *owner; int (*commit)(struct net *net, struct sk_buff *skb); - int (*abort)(struct net *net, struct sk_buff *skb); + int (*abort)(struct net *net, struct sk_buff *skb, bool autoload); void (*cleanup)(struct net *net); bool (*valid_genid)(struct net *net, u32 genid); }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2302d133af6f..352c0d708720 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -549,6 +549,7 @@ #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 +#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 7f8c7d9583d3..019fecd75d0c 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -40,6 +40,7 @@ extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s); extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev); extern void devm_pinctrl_put(struct pinctrl *p); +extern int pinctrl_select_default_state(struct device *dev); #ifdef CONFIG_PM extern int pinctrl_pm_select_default_state(struct device *dev); @@ -122,6 +123,11 @@ static inline void devm_pinctrl_put(struct pinctrl *p) { } +static inline int pinctrl_select_default_state(struct device *dev) +{ + return 0; +} + static inline int pinctrl_pm_select_default_state(struct device *dev) { return 0; diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index 6d54fe3bcac9..b8da8aef2446 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -101,6 +101,7 @@ struct mlxreg_core_data { * @aggr_mask: group aggregation mask; * @reg: group interrupt status register; * @mask: group interrupt mask; + * @capability: group capability register; * @cache: last status value for elements fro the same group; * @count: number of available elements in the group; * @ind: element's index inside the group; @@ -112,6 +113,7 @@ struct mlxreg_core_item { u32 aggr_mask; u32 reg; u32 mask; + u32 capability; u32 cache; u8 count; u8 ind; diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 60249e22e844..d39fc658c320 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -58,6 +58,7 @@ #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ #define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 #define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018 +#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075 /* Misc */ #define ASUS_WMI_DEVID_CAMERA 0x00060013 diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h index 08468fca5ea2..1ea5bae708a1 100644 --- a/include/linux/pmbus.h +++ b/include/linux/pmbus.h @@ -8,6 +8,8 @@ #ifndef _PMBUS_H_ #define _PMBUS_H_ +#include <linux/bits.h> + /* flags */ /* @@ -23,7 +25,14 @@ * communication errors for no explicable reason. For such chips, checking * the status register must be disabled. */ -#define PMBUS_SKIP_STATUS_CHECK (1 << 0) +#define PMBUS_SKIP_STATUS_CHECK BIT(0) + +/* + * PMBUS_WRITE_PROTECTED + * Set if the chip is write protected and write protection is not determined + * by the standard WRITE_PROTECT command. + */ +#define PMBUS_WRITE_PROTECTED BIT(1) struct pmbus_platform_data { u32 flags; /* Device specific flags */ diff --git a/include/linux/property.h b/include/linux/property.h index 48335288c2a9..d86de017c689 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -22,6 +22,7 @@ enum dev_prop_type { DEV_PROP_U32, DEV_PROP_U64, DEV_PROP_STRING, + DEV_PROP_REF, }; enum dev_dma_attr { @@ -223,28 +224,42 @@ static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode, return fwnode_property_read_u64_array(fwnode, propname, NULL, 0); } +struct software_node; + +/** + * struct software_node_ref_args - Reference property with additional arguments + * @node: Reference to a software node + * @nargs: Number of elements in @args array + * @args: Integer arguments + */ +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[NR_FWNODE_REFERENCE_ARGS]; +}; + /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. * @length: Length of data making up the value. - * @is_array: True when the property is an array. + * @is_inline: True when the property value is stored inline. * @type: Type of the data in unions. - * @pointer: Pointer to the property (an array of items of the given type). - * @value: Value of the property (when it is a single item of the given type). + * @pointer: Pointer to the property when it is not stored inline. + * @value: Value of the property when it is stored inline. */ struct property_entry { const char *name; size_t length; - bool is_array; + bool is_inline; enum dev_prop_type type; union { const void *pointer; union { - u8 u8_data; - u16 u16_data; - u32 u32_data; - u64 u64_data; - const char *str; + u8 u8_data[sizeof(u64) / sizeof(u8)]; + u16 u16_data[sizeof(u64) / sizeof(u16)]; + u32 u32_data[sizeof(u64) / sizeof(u32)]; + u64 u64_data[sizeof(u64) / sizeof(u64)]; + const char *str[sizeof(u64) / sizeof(char *)]; } value; }; }; @@ -256,17 +271,22 @@ struct property_entry { */ #define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \ - sizeof(((struct property_entry *)NULL)->value._elem_) + sizeof(((struct property_entry *)NULL)->value._elem_[0]) -#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\ +#define __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, _elsize_, _Type_, \ + _val_, _len_) \ (struct property_entry) { \ .name = _name_, \ - .length = (_len_) * __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \ - .is_array = true, \ + .length = (_len_) * (_elsize_), \ .type = DEV_PROP_##_Type_, \ { .pointer = _val_ }, \ } +#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\ + __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \ + __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \ + _Type_, _val_, _len_) + #define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_) #define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \ @@ -277,6 +297,10 @@ struct property_entry { __PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_) #define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \ __PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_) +#define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_) \ + __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \ + sizeof(struct software_node_ref_args), \ + REF, _val_, _len_) #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) @@ -288,13 +312,16 @@ struct property_entry { PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) +#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) #define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \ (struct property_entry) { \ .name = _name_, \ .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \ + .is_inline = true, \ .type = DEV_PROP_##_Type_, \ - { .value = { ._elem_ = _val_ } }, \ + { .value = { ._elem_[0] = _val_ } }, \ } #define PROPERTY_ENTRY_U8(_name_, _val_) \ @@ -311,6 +338,19 @@ struct property_entry { #define PROPERTY_ENTRY_BOOL(_name_) \ (struct property_entry) { \ .name = _name_, \ + .is_inline = true, \ +} + +#define PROPERTY_ENTRY_REF(_name_, _ref_, ...) \ +(struct property_entry) { \ + .name = _name_, \ + .length = sizeof(struct software_node_ref_args), \ + .type = DEV_PROP_REF, \ + { .pointer = &(const struct software_node_ref_args) { \ + .node = _ref_, \ + .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \ + .args = { __VA_ARGS__ }, \ + } }, \ } struct property_entry * @@ -376,44 +416,16 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, /* -------------------------------------------------------------------------- */ /* Software fwnode support - when HW description is incomplete or missing */ -struct software_node; - -/** - * struct software_node_ref_args - Reference with additional arguments - * @node: Reference to a software node - * @nargs: Number of elements in @args array - * @args: Integer arguments - */ -struct software_node_ref_args { - const struct software_node *node; - unsigned int nargs; - u64 args[NR_FWNODE_REFERENCE_ARGS]; -}; - -/** - * struct software_node_reference - Named software node reference property - * @name: Name of the property - * @nrefs: Number of elements in @refs array - * @refs: Array of references with optional arguments - */ -struct software_node_reference { - const char *name; - unsigned int nrefs; - const struct software_node_ref_args *refs; -}; - /** * struct software_node - Software node description * @name: Name of the software node * @parent: Parent of the software node * @properties: Array of device properties - * @references: Array of software node reference properties */ struct software_node { const char *name; const struct software_node *parent; const struct property_entry *properties; - const struct software_node_reference *references; }; bool is_software_node(const struct fwnode_handle *fwnode); diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 0832c9b66852..154e954b711d 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -27,8 +27,8 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #include <errno.h> #include <inttypes.h> -#include <limits.h> #include <stddef.h> +#include <string.h> #include <sys/mman.h> #include <sys/time.h> #include <sys/types.h> @@ -44,6 +44,9 @@ typedef uint64_t u64; #ifndef PAGE_SIZE # define PAGE_SIZE 4096 #endif +#ifndef PAGE_SHIFT +# define PAGE_SHIFT 12 +#endif extern const char raid6_empty_zero_page[PAGE_SIZE]; #define __init @@ -59,7 +62,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #define enable_kernel_altivec() #define disable_kernel_altivec() +#undef EXPORT_SYMBOL #define EXPORT_SYMBOL(sym) +#undef EXPORT_SYMBOL_GPL #define EXPORT_SYMBOL_GPL(sym) #define MODULE_LICENSE(licence) #define MODULE_DESCRIPTION(desc) diff --git a/include/linux/regmap.h b/include/linux/regmap.h index dfe493ac692d..f0a092a1a96d 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -145,6 +145,51 @@ struct reg_sequence { }) /** + * regmap_read_poll_timeout_atomic - Poll until a condition is met or a timeout occurs + * + * @map: Regmap to read from + * @addr: Address to poll + * @val: Unsigned integer variable to read the value into + * @cond: Break condition (usually involving @val) + * @delay_us: Time to udelay between reads in us (0 tight-loops). + * Should be less than ~10us since udelay is used + * (see Documentation/timers/timers-howto.rst). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read + * error return value in case of a error read. In the two former cases, + * the last read value at @addr is stored in @val. + * + * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h. + * + * Note: In general regmap cannot be used in atomic context. If you want to use + * this macro then first setup your regmap for atomic use (flat or no cache + * and MMIO regmap). + */ +#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __delay_us = (delay_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + int __ret; \ + for (;;) { \ + __ret = regmap_read((map), (addr), &(val)); \ + if (__ret) \ + break; \ + if (cond) \ + break; \ + if ((__timeout_us) && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + __ret = regmap_read((map), (addr), &(val)); \ + break; \ + } \ + if (__delay_us) \ + udelay(__delay_us); \ + } \ + __ret ?: ((cond) ? 0 : -ETIMEDOUT); \ +}) + +/** * regmap_field_read_poll_timeout - Poll until a condition is met or timeout * * @field: Regmap field to read from diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 337a46391527..6a92fd3105a3 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -287,6 +287,8 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, const char *const *supply_names, unsigned int num_supplies); +bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2); + #else /* @@ -593,6 +595,11 @@ regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, { } +static inline bool +regulator_is_equal(struct regulator *reg1, struct regulator *reg2) +{ + return false; +} #endif static inline int regulator_set_voltage_triplet(struct regulator *regulator, diff --git a/include/linux/sched.h b/include/linux/sched.h index 467d26046416..716ad1d8d95e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1929,11 +1929,11 @@ static inline void rseq_migrate(struct task_struct *t) /* * If parent process has a registered restartable sequences area, the - * child inherits. Only applies when forking a process, not a thread. + * child inherits. Unregister rseq for a clone with CLONE_VM set. */ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) { - if (clone_flags & CLONE_THREAD) { + if (clone_flags & CLONE_VM) { t->rseq = NULL; t->rseq_sig = 0; t->rseq_event_mask = 0; diff --git a/include/linux/security.h b/include/linux/security.h index 3e8d4bacd59d..64b19f050343 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -128,6 +128,8 @@ enum lockdown_reason { LOCKDOWN_CONFIDENTIALITY_MAX, }; +extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1]; + /* These functions are in security/commoncap.c */ extern int cap_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts); diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index ef7031f8a304..14d61bba0b79 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -358,17 +358,22 @@ static inline void sk_psock_update_proto(struct sock *sk, static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { - sk->sk_write_space = psock->saved_write_space; + sk->sk_prot->unhash = psock->saved_unhash; if (psock->sk_proto) { struct inet_connection_sock *icsk = inet_csk(sk); bool has_ulp = !!icsk->icsk_ulp_data; - if (has_ulp) - tcp_update_ulp(sk, psock->sk_proto); - else + if (has_ulp) { + tcp_update_ulp(sk, psock->sk_proto, + psock->saved_write_space); + } else { sk->sk_prot = psock->sk_proto; + sk->sk_write_space = psock->saved_write_space; + } psock->sk_proto = NULL; + } else { + sk->sk_write_space = psock->saved_write_space; } } diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h new file mode 100644 index 000000000000..26f73df0a524 --- /dev/null +++ b/include/linux/soc/ti/k3-ringacc.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * K3 Ring Accelerator (RA) subsystem interface + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef __SOC_TI_K3_RINGACC_API_H_ +#define __SOC_TI_K3_RINGACC_API_H_ + +#include <linux/types.h> + +struct device_node; + +/** + * enum k3_ring_mode - &struct k3_ring_cfg mode + * + * RA ring operational modes + * + * @K3_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access + * @K3_RINGACC_RING_MODE_MESSAGE: Messaging mode. Messaging mode requires + * that all accesses to the queue must go through this IP so that all + * accesses to the memory are controlled and ordered. This IP then + * controls the entire state of the queue, and SW has no directly control, + * such as through doorbells and cannot access the storage memory directly. + * This is particularly useful when more than one SW or HW entity can be + * the producer and/or consumer at the same time + * @K3_RINGACC_RING_MODE_CREDENTIALS: Credentials mode is message mode plus + * stores credentials with each message, requiring the element size to be + * doubled to fit the credentials. Any exposed memory should be protected + * by a firewall from unwanted access + */ +enum k3_ring_mode { + K3_RINGACC_RING_MODE_RING = 0, + K3_RINGACC_RING_MODE_MESSAGE, + K3_RINGACC_RING_MODE_CREDENTIALS, + K3_RINGACC_RING_MODE_INVALID +}; + +/** + * enum k3_ring_size - &struct k3_ring_cfg elm_size + * + * RA ring element's sizes in bytes. + */ +enum k3_ring_size { + K3_RINGACC_RING_ELSIZE_4 = 0, + K3_RINGACC_RING_ELSIZE_8, + K3_RINGACC_RING_ELSIZE_16, + K3_RINGACC_RING_ELSIZE_32, + K3_RINGACC_RING_ELSIZE_64, + K3_RINGACC_RING_ELSIZE_128, + K3_RINGACC_RING_ELSIZE_256, + K3_RINGACC_RING_ELSIZE_INVALID +}; + +struct k3_ringacc; +struct k3_ring; + +/** + * enum k3_ring_cfg - RA ring configuration structure + * + * @size: Ring size, number of elements + * @elm_size: Ring element size + * @mode: Ring operational mode + * @flags: Ring configuration flags. Possible values: + * @K3_RINGACC_RING_SHARED: when set allows to request the same ring + * few times. It's usable when the same ring is used as Free Host PD ring + * for different flows, for example. + * Note: Locking should be done by consumer if required + */ +struct k3_ring_cfg { + u32 size; + enum k3_ring_size elm_size; + enum k3_ring_mode mode; +#define K3_RINGACC_RING_SHARED BIT(1) + u32 flags; +}; + +#define K3_RINGACC_RING_ID_ANY (-1) + +/** + * of_k3_ringacc_get_by_phandle - find a RA by phandle property + * @np: device node + * @propname: property name containing phandle on RA node + * + * Returns pointer on the RA - struct k3_ringacc + * or -ENODEV if not found, + * or -EPROBE_DEFER if not yet registered + */ +struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, + const char *property); + +#define K3_RINGACC_RING_USE_PROXY BIT(1) + +/** + * k3_ringacc_request_ring - request ring from ringacc + * @ringacc: pointer on ringacc + * @id: ring id or K3_RINGACC_RING_ID_ANY for any general purpose ring + * @flags: + * @K3_RINGACC_RING_USE_PROXY: if set - proxy will be allocated and + * used to access ring memory. Sopported only for rings in + * Message/Credentials/Queue mode. + * + * Returns pointer on the Ring - struct k3_ring + * or NULL in case of failure. + */ +struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, + int id, u32 flags); + +/** + * k3_ringacc_ring_reset - ring reset + * @ring: pointer on Ring + * + * Resets ring internal state ((hw)occ, (hw)idx). + */ +void k3_ringacc_ring_reset(struct k3_ring *ring); +/** + * k3_ringacc_ring_reset - ring reset for DMA rings + * @ring: pointer on Ring + * + * Resets ring internal state ((hw)occ, (hw)idx). Should be used for rings + * which are read by K3 UDMA, like TX or Free Host PD rings. + */ +void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ); + +/** + * k3_ringacc_ring_free - ring free + * @ring: pointer on Ring + * + * Resets ring and free all alocated resources. + */ +int k3_ringacc_ring_free(struct k3_ring *ring); + +/** + * k3_ringacc_get_ring_id - Get the Ring ID + * @ring: pointer on ring + * + * Returns the Ring ID + */ +u32 k3_ringacc_get_ring_id(struct k3_ring *ring); + +/** + * k3_ringacc_get_ring_irq_num - Get the irq number for the ring + * @ring: pointer on ring + * + * Returns the interrupt number which can be used to request the interrupt + */ +int k3_ringacc_get_ring_irq_num(struct k3_ring *ring); + +/** + * k3_ringacc_ring_cfg - ring configure + * @ring: pointer on ring + * @cfg: Ring configuration parameters (see &struct k3_ring_cfg) + * + * Configures ring, including ring memory allocation. + * Returns 0 on success, errno otherwise. + */ +int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg); + +/** + * k3_ringacc_ring_get_size - get ring size + * @ring: pointer on ring + * + * Returns ring size in number of elements. + */ +u32 k3_ringacc_ring_get_size(struct k3_ring *ring); + +/** + * k3_ringacc_ring_get_free - get free elements + * @ring: pointer on ring + * + * Returns number of free elements in the ring. + */ +u32 k3_ringacc_ring_get_free(struct k3_ring *ring); + +/** + * k3_ringacc_ring_get_occ - get ring occupancy + * @ring: pointer on ring + * + * Returns total number of valid entries on the ring + */ +u32 k3_ringacc_ring_get_occ(struct k3_ring *ring); + +/** + * k3_ringacc_ring_is_full - checks if ring is full + * @ring: pointer on ring + * + * Returns true if the ring is full + */ +u32 k3_ringacc_ring_is_full(struct k3_ring *ring); + +/** + * k3_ringacc_ring_push - push element to the ring tail + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element to the ring tail. Size of the ring element is + * determined by ring configuration &struct k3_ring_cfg elm_size. + * + * Returns 0 on success, errno otherwise. + */ +int k3_ringacc_ring_push(struct k3_ring *ring, void *elem); + +/** + * k3_ringacc_ring_pop - pop element from the ring head + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element from the ring head. Size of the ring element is + * determined by ring configuration &struct k3_ring_cfg elm_size.. + * + * Returns 0 on success, errno otherwise. + */ +int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem); + +/** + * k3_ringacc_ring_push_head - push element to the ring head + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element to the ring head. Size of the ring element is + * determined by ring configuration &struct k3_ring_cfg elm_size. + * + * Returns 0 on success, errno otherwise. + * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING + */ +int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem); + +/** + * k3_ringacc_ring_pop_tail - pop element from the ring tail + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element from the ring tail. Size of the ring element is + * determined by ring configuration &struct k3_ring_cfg elm_size. + * + * Returns 0 on success, errno otherwise. + * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING + */ +int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem); + +u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring); + +#endif /* __SOC_TI_K3_RINGACC_API_H_ */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 98fe8663033a..6d16ba01ff5a 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -423,6 +423,12 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * GPIO descriptors rather than using global GPIO numbers grabbed by the * driver. This will fill in @cs_gpiods and @cs_gpios should not be used, * and SPI devices will have the cs_gpiod assigned rather than cs_gpio. + * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will + * fill in this field with the first unused native CS, to be used by SPI + * controller drivers that need to drive a native CS when using GPIO CS. + * @max_native_cs: When cs_gpiods is used, and this field is filled in, + * spi_register_controller() will validate all native CS (including the + * unused native CS) against this value. * @statistics: statistics for the spi_controller * @dma_tx: DMA transmit channel * @dma_rx: DMA receive channel @@ -624,6 +630,8 @@ struct spi_controller { int *cs_gpios; struct gpio_desc **cs_gpiods; bool use_gpio_descriptors; + u8 unused_native_cs; + u8 max_native_cs; /* statistics */ struct spi_statistics statistics; @@ -689,10 +697,10 @@ extern void spi_finalize_current_transfer(struct spi_controller *ctlr); /* Helper calls for driver to timestamp transfer */ void spi_take_timestamp_pre(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off); + size_t progress, bool irqs_off); void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off); + size_t progress, bool irqs_off); /* the spi driver core manages memory for the spi_controller classdev */ extern struct spi_controller *__spi_alloc_controller(struct device *host, diff --git a/include/linux/spi/spi_oc_tiny.h b/include/linux/spi/spi_oc_tiny.h index a3ecf2feadf2..284872ac130c 100644 --- a/include/linux/spi/spi_oc_tiny.h +++ b/include/linux/spi/spi_oc_tiny.h @@ -6,16 +6,12 @@ * struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI * @freq: input clock freq to the core. * @baudwidth: baud rate divider width of the core. - * @gpio_cs_count: number of gpio pins used for chipselect. - * @gpio_cs: array of gpio pins used for chipselect. * * freq and baudwidth are used only if the divider is programmable. */ struct tiny_spi_platform_data { unsigned int freq; unsigned int baudwidth; - unsigned int gpio_cs_count; - int *gpio_cs; }; #endif /* _LINUX_SPI_SPI_OC_TINY_H */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 6fc8843f1c9e..4a230c2f1c31 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -329,6 +329,7 @@ extern void arch_suspend_disable_irqs(void); extern void arch_suspend_enable_irqs(void); extern int pm_suspend(suspend_state_t state); +extern bool sync_on_suspend_enabled; #else /* !CONFIG_SUSPEND */ #define suspend_valid_only_mem NULL @@ -342,6 +343,7 @@ static inline bool pm_suspend_default_s2idle(void) { return false; } static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } +static inline bool sync_on_suspend_enabled(void) { return true; } static inline bool idle_should_enter_s2idle(void) { return false; } static inline void __init pm_states_init(void) {} static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {} diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h index 85ec745767bd..966146f7267a 100644 --- a/include/linux/sxgbe_platform.h +++ b/include/linux/sxgbe_platform.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * 10G controller driver for Samsung EXYNOS SoCs + * 10G controller driver for Samsung Exynos SoCs * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com diff --git a/include/linux/tnum.h b/include/linux/tnum.h index c17af77f3fae..ea627d1ab7e3 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -30,7 +30,7 @@ struct tnum tnum_lshift(struct tnum a, u8 shift); /* Shift (rsh) a tnum right (by a fixed shift) */ struct tnum tnum_rshift(struct tnum a, u8 shift); /* Shift (arsh) a tnum right (by a fixed min_shift) */ -struct tnum tnum_arshift(struct tnum a, u8 min_shift); +struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness); /* Add two tnums, return @a + @b */ struct tnum tnum_add(struct tnum a, struct tnum b); /* Subtract two tnums, return @a - @b */ diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 86eecbd98e84..f73e1775ded0 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -417,6 +417,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) } /** + * xa_for_each_range() - Iterate over a portion of an XArray. + * @xa: XArray. + * @index: Index of @entry. + * @entry: Entry retrieved from array. + * @start: First index to retrieve from array. + * @last: Last index to retrieve from array. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. You may modify @index during the iteration if you + * want to skip or reprocess indices. It is safe to modify the array + * during the iteration. At the end of the iteration, @entry will be set + * to NULL and @index will have a value less than or equal to max. + * + * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n). You have + * to handle your own locking with xas_for_each(), and if you have to unlock + * after each iteration, it will also end up being O(n.log(n)). + * xa_for_each_range() will spin if it hits a retry entry; if you intend to + * see retry entries, you should use the xas_for_each() iterator instead. + * The xas_for_each() iterator will expand into more inline code than + * xa_for_each_range(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each_range(xa, index, entry, start, last) \ + for (index = start, \ + entry = xa_find(xa, &index, last, XA_PRESENT); \ + entry; \ + entry = xa_find_after(xa, &index, last, XA_PRESENT)) + +/** * xa_for_each_start() - Iterate over a portion of an XArray. * @xa: XArray. * @index: Index of @entry. @@ -439,11 +469,8 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) * * Context: Any context. Takes and releases the RCU lock. */ -#define xa_for_each_start(xa, index, entry, start) \ - for (index = start, \ - entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \ - entry; \ - entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)) +#define xa_for_each_start(xa, index, entry, start) \ + xa_for_each_range(xa, index, entry, start, ULONG_MAX) /** * xa_for_each() - Iterate over present entries in an XArray. @@ -508,6 +535,14 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) spin_lock_irqsave(&(xa)->xa_lock, flags) #define xa_unlock_irqrestore(xa, flags) \ spin_unlock_irqrestore(&(xa)->xa_lock, flags) +#define xa_lock_nested(xa, subclass) \ + spin_lock_nested(&(xa)->xa_lock, subclass) +#define xa_lock_bh_nested(xa, subclass) \ + spin_lock_bh_nested(&(xa)->xa_lock, subclass) +#define xa_lock_irq_nested(xa, subclass) \ + spin_lock_irq_nested(&(xa)->xa_lock, subclass) +#define xa_lock_irqsave_nested(xa, flags, subclass) \ + spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass) /* * Versions of the normal API which require the caller to hold the |