diff options
| author | Paul Cercueil <paul@crapouillou.net> | 2024-04-19 11:25:37 +0300 | 
|---|---|---|
| committer | Jonathan Cameron <Jonathan.Cameron@huawei.com> | 2024-04-20 17:41:14 +0300 | 
| commit | c1b91566580c245cf1147745d174be5e059ace6b (patch) | |
| tree | 4ad5fc3113e061bfcaac3bbbfdb11406ef8bdd4f /drivers/iio/buffer/industrialio-buffer-dmaengine.c | |
| parent | fb09febafd160b7aefd9e61f710a0c50f0472403 (diff) | |
| download | linux-c1b91566580c245cf1147745d174be5e059ace6b.tar.xz | |
iio: buffer-dmaengine: Support specifying buffer direction
Update the devm_iio_dmaengine_buffer_setup() function to support
specifying the buffer direction.
Update the iio_dmaengine_buffer_submit() function to handle input
buffers as well as output buffers.
Signed-off-by: Paul Cercueil <paul@crapouillou.net>
Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>
Signed-off-by: Nuno Sa <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-4-5ca45b4de294@analog.com
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Diffstat (limited to 'drivers/iio/buffer/industrialio-buffer-dmaengine.c')
| -rw-r--r-- | drivers/iio/buffer/industrialio-buffer-dmaengine.c | 44 | 
1 files changed, 30 insertions, 14 deletions
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index df05d66afff9..951012651018 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -64,14 +64,25 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,  	struct dmaengine_buffer *dmaengine_buffer =  		iio_buffer_to_dmaengine_buffer(&queue->buffer);  	struct dma_async_tx_descriptor *desc; +	enum dma_transfer_direction dma_dir; +	size_t max_size;  	dma_cookie_t cookie; -	block->bytes_used = min(block->size, dmaengine_buffer->max_size); -	block->bytes_used = round_down(block->bytes_used, -			dmaengine_buffer->align); +	max_size = min(block->size, dmaengine_buffer->max_size); +	max_size = round_down(max_size, dmaengine_buffer->align); + +	if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) { +		block->bytes_used = max_size; +		dma_dir = DMA_DEV_TO_MEM; +	} else { +		dma_dir = DMA_MEM_TO_DEV; +	} + +	if (!block->bytes_used || block->bytes_used > max_size) +		return -EINVAL;  	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, -		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM, +		block->phys_addr, block->bytes_used, dma_dir,  		DMA_PREP_INTERRUPT);  	if (!desc)  		return -ENOMEM; @@ -229,9 +240,10 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)  }  EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER); -struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, -					      struct iio_dev *indio_dev, -					      const char *channel) +struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, +						  struct iio_dev *indio_dev, +						  const char *channel, +						  enum iio_buffer_direction dir)  {  	struct iio_buffer *buffer;  	int ret; @@ -242,6 +254,8 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,  	indio_dev->modes |= INDIO_BUFFER_HARDWARE; +	buffer->direction = dir; +  	ret = iio_device_attach_buffer(indio_dev, buffer);  	if (ret) {  		iio_dmaengine_buffer_free(buffer); @@ -250,7 +264,7 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,  	return buffer;  } -EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); +EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);  static void __devm_iio_dmaengine_buffer_free(void *buffer)  { @@ -258,30 +272,32 @@ static void __devm_iio_dmaengine_buffer_free(void *buffer)  }  /** - * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device + * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device   * @dev: Parent device for the buffer   * @indio_dev: IIO device to which to attach this buffer.   * @channel: DMA channel name, typically "rx". + * @dir: Direction of buffer (in or out)   *   * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()   * and attaches it to an IIO device with iio_device_attach_buffer().   * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the   * IIO device.   */ -int devm_iio_dmaengine_buffer_setup(struct device *dev, -				    struct iio_dev *indio_dev, -				    const char *channel) +int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, +					struct iio_dev *indio_dev, +					const char *channel, +					enum iio_buffer_direction dir)  {  	struct iio_buffer *buffer; -	buffer = iio_dmaengine_buffer_setup(dev, indio_dev, channel); +	buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);  	if (IS_ERR(buffer))  		return PTR_ERR(buffer);  	return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,  					buffer);  } -EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); +EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);  MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");  MODULE_DESCRIPTION("DMA buffer for the IIO framework");  | 
