diff options
Diffstat (limited to 'include/linux/dmaengine.h')
| -rw-r--r-- | include/linux/dmaengine.h | 145 | 
1 files changed, 138 insertions, 7 deletions
| diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c47c68e535e8..16a1cad30c33 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -607,11 +607,38 @@ enum dmaengine_alignment {  };  /** + * struct dma_slave_map - associates slave device and it's slave channel with + * parameter to be used by a filter function + * @devname: name of the device + * @slave: slave channel name + * @param: opaque parameter to pass to struct dma_filter.fn + */ +struct dma_slave_map { +	const char *devname; +	const char *slave; +	void *param; +}; + +/** + * struct dma_filter - information for slave device/channel to filter_fn/param + * mapping + * @fn: filter function callback + * @mapcnt: number of slave device/channel in the map + * @map: array of channel to filter mapping data + */ +struct dma_filter { +	dma_filter_fn fn; +	int mapcnt; +	const struct dma_slave_map *map; +}; + +/**   * struct dma_device - info on the entity supplying DMA services   * @chancnt: how many DMA channels are supported   * @privatecnt: how many DMA channels are requested by dma_request_channel   * @channels: the list of struct dma_chan   * @global_node: list_head for global dma_device_list + * @filter: information for device/slave to filter function/param mapping   * @cap_mask: one or more dma_capability flags   * @max_xor: maximum number of xor sources, 0 if no capability   * @max_pq: maximum number of PQ sources and PQ-continue capability @@ -654,11 +681,14 @@ enum dmaengine_alignment {   *	paused. Returns 0 or an error code   * @device_terminate_all: Aborts all transfers on a channel. Returns 0   *	or an error code + * @device_synchronize: Synchronizes the termination of a transfers to the + *  current context.   * @device_tx_status: poll for transaction completion, the optional   *	txstate parameter can be supplied with a pointer to get a   *	struct with auxiliary transfer status information, otherwise the call   *	will just return a simple status code   * @device_issue_pending: push pending transactions to hardware + * @descriptor_reuse: a submitted transfer can be resubmitted after completion   */  struct dma_device { @@ -666,6 +696,7 @@ struct dma_device {  	unsigned int privatecnt;  	struct list_head channels;  	struct list_head global_node; +	struct dma_filter filter;  	dma_cap_mask_t  cap_mask;  	unsigned short max_xor;  	unsigned short max_pq; @@ -681,6 +712,7 @@ struct dma_device {  	u32 src_addr_widths;  	u32 dst_addr_widths;  	u32 directions; +	bool descriptor_reuse;  	enum dma_residue_granularity residue_granularity;  	int (*device_alloc_chan_resources)(struct dma_chan *chan); @@ -737,6 +769,7 @@ struct dma_device {  	int (*device_pause)(struct dma_chan *chan);  	int (*device_resume)(struct dma_chan *chan);  	int (*device_terminate_all)(struct dma_chan *chan); +	void (*device_synchronize)(struct dma_chan *chan);  	enum dma_status (*device_tx_status)(struct dma_chan *chan,  					    dma_cookie_t cookie, @@ -828,6 +861,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(  			src_sg, src_nents, flags);  } +/** + * dmaengine_terminate_all() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * This function is DEPRECATED use either dmaengine_terminate_sync() or + * dmaengine_terminate_async() instead. + */  static inline int dmaengine_terminate_all(struct dma_chan *chan)  {  	if (chan->device->device_terminate_all) @@ -836,6 +876,88 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)  	return -ENOSYS;  } +/** + * dmaengine_terminate_async() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending descriptors + * that have previously been submitted to the channel. It is not guaranteed + * though that the transfer for the active descriptor has stopped when the + * function returns. Furthermore it is possible the complete callback of a + * submitted transfer is still running when this function returns. + * + * dmaengine_synchronize() needs to be called before it is safe to free + * any memory that is accessed by previously submitted descriptors or before + * freeing any resources accessed from within the completion callback of any + * perviously submitted descriptors. + * + * This function can be called from atomic context as well as from within a + * complete callback of a descriptor submitted on the same channel. + * + * If none of the two conditions above apply consider using + * dmaengine_terminate_sync() instead. + */ +static inline int dmaengine_terminate_async(struct dma_chan *chan) +{ +	if (chan->device->device_terminate_all) +		return chan->device->device_terminate_all(chan); + +	return -EINVAL; +} + +/** + * dmaengine_synchronize() - Synchronize DMA channel termination + * @chan: The channel to synchronize + * + * Synchronizes to the DMA channel termination to the current context. When this + * function returns it is guaranteed that all transfers for previously issued + * descriptors have stopped and and it is safe to free the memory assoicated + * with them. Furthermore it is guaranteed that all complete callback functions + * for a previously submitted descriptor have finished running and it is safe to + * free resources accessed from within the complete callbacks. + * + * The behavior of this function is undefined if dma_async_issue_pending() has + * been called between dmaengine_terminate_async() and this function. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline void dmaengine_synchronize(struct dma_chan *chan) +{ +	might_sleep(); + +	if (chan->device->device_synchronize) +		chan->device->device_synchronize(chan); +} + +/** + * dmaengine_terminate_sync() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending transfers + * that have previously been submitted to the channel. It is similar to + * dmaengine_terminate_async() but guarantees that the DMA transfer has actually + * stopped and that all complete callbacks have finished running when the + * function returns. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline int dmaengine_terminate_sync(struct dma_chan *chan) +{ +	int ret; + +	ret = dmaengine_terminate_async(chan); +	if (ret) +		return ret; + +	dmaengine_synchronize(chan); + +	return 0; +} +  static inline int dmaengine_pause(struct dma_chan *chan)  {  	if (chan->device->device_pause) @@ -1140,9 +1262,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);  void dma_issue_pending_all(void);  struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,  					dma_filter_fn fn, void *fn_param); -struct dma_chan *dma_request_slave_channel_reason(struct device *dev, -						  const char *name);  struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); + +struct dma_chan *dma_request_chan(struct device *dev, const char *name); +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); +  void dma_release_channel(struct dma_chan *chan);  int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);  #else @@ -1166,16 +1290,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,  {  	return NULL;  } -static inline struct dma_chan *dma_request_slave_channel_reason( -					struct device *dev, const char *name) -{ -	return ERR_PTR(-ENODEV); -}  static inline struct dma_chan *dma_request_slave_channel(struct device *dev,  							 const char *name)  {  	return NULL;  } +static inline struct dma_chan *dma_request_chan(struct device *dev, +						const char *name) +{ +	return ERR_PTR(-ENODEV); +} +static inline struct dma_chan *dma_request_chan_by_mask( +						const dma_cap_mask_t *mask) +{ +	return ERR_PTR(-ENODEV); +}  static inline void dma_release_channel(struct dma_chan *chan)  {  } @@ -1186,6 +1315,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,  }  #endif +#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) +  static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)  {  	struct dma_slave_caps caps; | 
