summaryrefslogtreecommitdiff
path: root/drivers/media/platform/xilinx/xilinx-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/platform/xilinx/xilinx-dma.c')
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index d9dcd4be2792..5af66c20475b 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -285,7 +285,7 @@ done:
* @dma: DMA channel that uses the buffer
*/
struct xvip_dma_buffer {
- struct vb2_buffer buf;
+ struct vb2_v4l2_buffer buf;
struct list_head queue;
struct xvip_dma *dma;
};
@@ -301,11 +301,11 @@ static void xvip_dma_complete(void *param)
list_del(&buf->queue);
spin_unlock(&dma->queued_lock);
- buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
- buf->buf.v4l2_buf.sequence = dma->sequence++;
- v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp);
- vb2_set_plane_payload(&buf->buf, 0, dma->format.sizeimage);
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+ buf->buf.field = V4L2_FIELD_NONE;
+ buf->buf.sequence = dma->sequence++;
+ v4l2_get_timestamp(&buf->buf.timestamp);
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
}
static int
@@ -329,8 +329,9 @@ xvip_dma_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
- struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
+ struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
buf->dma = dma;
@@ -339,8 +340,9 @@ static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
- struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
+ struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
struct dma_async_tx_descriptor *desc;
dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
u32 flags;
@@ -367,7 +369,7 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
if (!desc) {
dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
return;
}
desc->callback = xvip_dma_complete;
@@ -434,7 +436,7 @@ error:
/* Give back all queued buffers to videobuf2. */
spin_lock_irq(&dma->queued_lock);
list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_QUEUED);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
list_del(&buf->queue);
}
spin_unlock_irq(&dma->queued_lock);
@@ -461,7 +463,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq)
/* Give back all queued buffers to videobuf2. */
spin_lock_irq(&dma->queued_lock);
list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
list_del(&buf->queue);
}
spin_unlock_irq(&dma->queued_lock);