diff options
Diffstat (limited to 'drivers/media/platform/stm32/stm32-dcmi.c')
-rw-r--r-- | drivers/media/platform/stm32/stm32-dcmi.c | 259 |
1 files changed, 132 insertions, 127 deletions
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index 2e1933d872ee..721564176d8c 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -22,7 +22,9 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_graph.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/videodev2.h> @@ -84,19 +86,14 @@ enum state { STOPPED = 0, + WAIT_FOR_BUFFER, RUNNING, - STOPPING, }; #define MIN_WIDTH 16U -#define MAX_WIDTH 2048U +#define MAX_WIDTH 2592U #define MIN_HEIGHT 16U -#define MAX_HEIGHT 2048U - -#define MIN_JPEG_WIDTH 16U -#define MAX_JPEG_WIDTH 2592U -#define MIN_JPEG_HEIGHT 16U -#define MAX_JPEG_HEIGHT 2592U +#define MAX_HEIGHT 2592U #define TIMEOUT_MS 1000 @@ -194,7 +191,7 @@ static inline void reg_clear(void __iomem *base, u32 reg, u32 mask) reg_write(base, reg, reg_read(base, reg) & ~mask); } -static int dcmi_start_capture(struct stm32_dcmi *dcmi); +static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf); static void dcmi_buffer_done(struct stm32_dcmi *dcmi, struct dcmi_buf *buf, @@ -206,6 +203,8 @@ static void dcmi_buffer_done(struct stm32_dcmi *dcmi, if (!buf) return; + list_del_init(&buf->list); + vbuf = &buf->vb; vbuf->sequence = dcmi->sequence++; @@ -223,6 +222,8 @@ static void dcmi_buffer_done(struct stm32_dcmi *dcmi, static int dcmi_restart_capture(struct stm32_dcmi *dcmi) { + struct dcmi_buf *buf; + spin_lock_irq(&dcmi->irqlock); if (dcmi->state != RUNNING) { @@ -232,34 +233,30 @@ static int dcmi_restart_capture(struct stm32_dcmi *dcmi) /* Restart a new DMA transfer with next buffer */ if (list_empty(&dcmi->buffers)) { - dev_err(dcmi->dev, "%s: No more buffer queued, cannot capture buffer\n", - __func__); - dcmi->errors_count++; - dcmi->active = NULL; - + dev_dbg(dcmi->dev, "Capture restart is deferred to next buffer queueing\n"); + dcmi->state = WAIT_FOR_BUFFER; spin_unlock_irq(&dcmi->irqlock); - return -EINVAL; + return 0; } - - dcmi->active = list_entry(dcmi->buffers.next, - struct dcmi_buf, list); - list_del_init(&dcmi->active->list); + buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list); + dcmi->active = buf; spin_unlock_irq(&dcmi->irqlock); - return dcmi_start_capture(dcmi); + return dcmi_start_capture(dcmi, buf); } static void dcmi_dma_callback(void *param) { struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param; - struct dma_chan *chan = dcmi->dma_chan; struct dma_tx_state state; enum dma_status status; struct dcmi_buf *buf = dcmi->active; + spin_lock_irq(&dcmi->irqlock); + /* Check DMA status */ - status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state); + status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state); switch (status) { case DMA_IN_PROGRESS: @@ -270,6 +267,9 @@ static void dcmi_dma_callback(void *param) break; case DMA_ERROR: dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__); + + /* Return buffer to V4L2 in error state */ + dcmi_buffer_done(dcmi, buf, 0, -EIO); break; case DMA_COMPLETE: dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__); @@ -277,15 +277,19 @@ static void dcmi_dma_callback(void *param) /* Return buffer to V4L2 */ dcmi_buffer_done(dcmi, buf, buf->size, 0); + spin_unlock_irq(&dcmi->irqlock); + /* Restart capture */ if (dcmi_restart_capture(dcmi)) dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n", __func__); - break; + return; default: dev_err(dcmi->dev, "%s: Received unknown status\n", __func__); break; } + + spin_unlock_irq(&dcmi->irqlock); } static int dcmi_start_dma(struct stm32_dcmi *dcmi, @@ -313,10 +317,11 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi, /* Prepare a DMA transaction */ desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr, buf->size, - DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); if (!desc) { - dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer size %zu\n", - __func__, buf->size); + dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n", + __func__, &buf->paddr, buf->size); return -EINVAL; } @@ -336,10 +341,9 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi, return 0; } -static int dcmi_start_capture(struct stm32_dcmi *dcmi) +static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf) { int ret; - struct dcmi_buf *buf = dcmi->active; if (!buf) return -EINVAL; @@ -382,7 +386,6 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi) { struct dma_tx_state state; enum dma_status status; - struct dma_chan *chan = dcmi->dma_chan; struct dcmi_buf *buf = dcmi->active; if (!buf) @@ -390,8 +393,7 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi) /* * Because of variable JPEG buffer size sent by sensor, - * DMA transfer never completes due to transfer size - * never reached. + * DMA transfer never completes due to transfer size never reached. * In order to ensure that all the JPEG data are transferred * in active buffer memory, DMA is drained. * Then DMA tx status gives the amount of data transferred @@ -400,10 +402,10 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi) */ /* Drain DMA */ - dmaengine_synchronize(chan); + dmaengine_synchronize(dcmi->dma_chan); /* Get DMA residue to get JPEG size */ - status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state); + status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state); if (status != DMA_ERROR && state.residue < buf->size) { /* Return JPEG buffer to V4L2 with received JPEG buffer size */ dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0); @@ -430,18 +432,6 @@ static irqreturn_t dcmi_irq_thread(int irq, void *arg) spin_lock_irq(&dcmi->irqlock); - /* Stop capture is required */ - if (dcmi->state == STOPPING) { - reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR); - - dcmi->state = STOPPED; - - complete(&dcmi->complete); - - spin_unlock_irq(&dcmi->irqlock); - return IRQ_HANDLED; - } - if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) { dcmi->errors_count++; if (dcmi->misr & IT_OVR) @@ -495,8 +485,6 @@ static int dcmi_queue_setup(struct vb2_queue *vq, *nplanes = 1; sizes[0] = size; - dcmi->active = NULL; - dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n", *nbuffers, size); @@ -554,21 +542,24 @@ static void dcmi_buf_queue(struct vb2_buffer *vb) spin_lock_irq(&dcmi->irqlock); - if (dcmi->state == RUNNING && !dcmi->active) { + /* Enqueue to video buffers list */ + list_add_tail(&buf->list, &dcmi->buffers); + + if (dcmi->state == WAIT_FOR_BUFFER) { + dcmi->state = RUNNING; dcmi->active = buf; dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n", buf->vb.vb2_buf.index); spin_unlock_irq(&dcmi->irqlock); - if (dcmi_start_capture(dcmi)) + if (dcmi_start_capture(dcmi, buf)) dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n", __func__); - } else { - /* Enqueue to video buffers list */ - list_add_tail(&buf->list, &dcmi->buffers); - spin_unlock_irq(&dcmi->irqlock); + return; } + + spin_unlock_irq(&dcmi->irqlock); } static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) @@ -578,9 +569,9 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) u32 val = 0; int ret; - ret = clk_enable(dcmi->mclk); + ret = pm_runtime_get_sync(dcmi->dev); if (ret) { - dev_err(dcmi->dev, "%s: Failed to start streaming, cannot enable clock\n", + dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync\n", __func__); goto err_release_buffers; } @@ -590,7 +581,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) if (ret && ret != -ENOIOCTLCMD) { dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error", __func__); - goto err_disable_clock; + goto err_pm_put; } spin_lock_irq(&dcmi->irqlock); @@ -636,13 +627,10 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) /* Enable dcmi */ reg_set(dcmi->regs, DCMI_CR, CR_ENABLE); - dcmi->state = RUNNING; - dcmi->sequence = 0; dcmi->errors_count = 0; dcmi->overrun_count = 0; dcmi->buffers_count = 0; - dcmi->active = NULL; /* * Start transfer if at least one buffer has been queued, @@ -650,17 +638,20 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) */ if (list_empty(&dcmi->buffers)) { dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n"); + dcmi->state = WAIT_FOR_BUFFER; spin_unlock_irq(&dcmi->irqlock); return 0; } - dcmi->active = list_entry(dcmi->buffers.next, struct dcmi_buf, list); - list_del_init(&dcmi->active->list); + buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list); + dcmi->active = buf; + + dcmi->state = RUNNING; dev_dbg(dcmi->dev, "Start streaming, starting capture\n"); spin_unlock_irq(&dcmi->irqlock); - ret = dcmi_start_capture(dcmi); + ret = dcmi_start_capture(dcmi, buf); if (ret) { dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n", __func__); @@ -675,8 +666,8 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) err_subdev_streamoff: v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0); -err_disable_clock: - clk_disable(dcmi->mclk); +err_pm_put: + pm_runtime_put(dcmi->dev); err_release_buffers: spin_lock_irq(&dcmi->irqlock); @@ -684,15 +675,11 @@ err_release_buffers: * Return all buffers to vb2 in QUEUED state. * This will give ownership back to userspace */ - if (dcmi->active) { - buf = dcmi->active; - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); - dcmi->active = NULL; - } list_for_each_entry_safe(buf, node, &dcmi->buffers, list) { list_del_init(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } + dcmi->active = NULL; spin_unlock_irq(&dcmi->irqlock); return ret; @@ -702,8 +689,6 @@ static void dcmi_stop_streaming(struct vb2_queue *vq) { struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq); struct dcmi_buf *buf, *node; - unsigned long time_ms = msecs_to_jiffies(TIMEOUT_MS); - long timeout; int ret; /* Disable stream on the sub device */ @@ -713,13 +698,6 @@ static void dcmi_stop_streaming(struct vb2_queue *vq) __func__, ret); spin_lock_irq(&dcmi->irqlock); - dcmi->state = STOPPING; - spin_unlock_irq(&dcmi->irqlock); - - timeout = wait_for_completion_interruptible_timeout(&dcmi->complete, - time_ms); - - spin_lock_irq(&dcmi->irqlock); /* Disable interruptions */ reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR); @@ -727,29 +705,21 @@ static void dcmi_stop_streaming(struct vb2_queue *vq) /* Disable DCMI */ reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE); - if (!timeout) { - dev_err(dcmi->dev, "%s: Timeout during stop streaming\n", - __func__); - dcmi->state = STOPPED; - } - /* Return all queued buffers to vb2 in ERROR state */ - if (dcmi->active) { - buf = dcmi->active; - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); - dcmi->active = NULL; - } list_for_each_entry_safe(buf, node, &dcmi->buffers, list) { list_del_init(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } + dcmi->active = NULL; + dcmi->state = STOPPED; + spin_unlock_irq(&dcmi->irqlock); /* Stop all pending DMA operations */ dmaengine_terminate_all(dcmi->dma_chan); - clk_disable(dcmi->mclk); + pm_runtime_put(dcmi->dev); if (dcmi->errors_count) dev_warn(dcmi->dev, "Some errors found while streaming: errors=%d (overrun=%d), buffers=%d\n", @@ -843,14 +813,8 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f, } /* Limit to hardware capabilities */ - if (pix->pixelformat == V4L2_PIX_FMT_JPEG) { - pix->width = clamp(pix->width, MIN_JPEG_WIDTH, MAX_JPEG_WIDTH); - pix->height = - clamp(pix->height, MIN_JPEG_HEIGHT, MAX_JPEG_HEIGHT); - } else { - pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH); - pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT); - } + pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH); + pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT); /* No crop if JPEG is requested */ do_crop = dcmi->do_crop && (pix->pixelformat != V4L2_PIX_FMT_JPEG); @@ -1605,23 +1569,20 @@ static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node) struct device_node *ep = NULL; struct device_node *remote; - while (1) { - ep = of_graph_get_next_endpoint(node, ep); - if (!ep) - return -EINVAL; + ep = of_graph_get_next_endpoint(node, ep); + if (!ep) + return -EINVAL; - remote = of_graph_get_remote_port_parent(ep); - if (!remote) { - of_node_put(ep); - return -EINVAL; - } + remote = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (!remote) + return -EINVAL; - /* Remote node to connect */ - dcmi->entity.node = remote; - dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE; - dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote); - return 0; - } + /* Remote node to connect */ + dcmi->entity.node = remote; + dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE; + dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote); + return 0; } static int dcmi_graph_init(struct stm32_dcmi *dcmi) @@ -1696,23 +1657,20 @@ static int dcmi_probe(struct platform_device *pdev) } ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep); + of_node_put(np); if (ret) { dev_err(&pdev->dev, "Could not parse the endpoint\n"); - of_node_put(np); return -ENODEV; } if (ep.bus_type == V4L2_MBUS_CSI2) { dev_err(&pdev->dev, "CSI bus not supported\n"); - of_node_put(np); return -ENODEV; } dcmi->bus.flags = ep.bus.parallel.flags; dcmi->bus.bus_width = ep.bus.parallel.bus_width; dcmi->bus.data_shift = ep.bus.parallel.data_shift; - of_node_put(np); - irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(&pdev->dev, "Could not get irq\n"); @@ -1751,12 +1709,6 @@ static int dcmi_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - ret = clk_prepare(mclk); - if (ret) { - dev_err(&pdev->dev, "Unable to prepare mclk %p\n", mclk); - goto err_dma_release; - } - spin_lock_init(&dcmi->irqlock); mutex_init(&dcmi->lock); init_completion(&dcmi->complete); @@ -1772,7 +1724,7 @@ static int dcmi_probe(struct platform_device *pdev) /* Initialize the top-level structure */ ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev); if (ret) - goto err_clk_unprepare; + goto err_dma_release; dcmi->vdev = video_device_alloc(); if (!dcmi->vdev) { @@ -1832,14 +1784,15 @@ static int dcmi_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Probe done\n"); platform_set_drvdata(pdev, dcmi); + + pm_runtime_enable(&pdev->dev); + return 0; err_device_release: video_device_release(dcmi->vdev); err_device_unregister: v4l2_device_unregister(&dcmi->v4l2_dev); -err_clk_unprepare: - clk_unprepare(dcmi->mclk); err_dma_release: dma_release_channel(dcmi->dma_chan); @@ -1850,20 +1803,72 @@ static int dcmi_remove(struct platform_device *pdev) { struct stm32_dcmi *dcmi = platform_get_drvdata(pdev); + pm_runtime_disable(&pdev->dev); + v4l2_async_notifier_unregister(&dcmi->notifier); v4l2_device_unregister(&dcmi->v4l2_dev); - clk_unprepare(dcmi->mclk); + dma_release_channel(dcmi->dma_chan); return 0; } +static __maybe_unused int dcmi_runtime_suspend(struct device *dev) +{ + struct stm32_dcmi *dcmi = dev_get_drvdata(dev); + + clk_disable_unprepare(dcmi->mclk); + + return 0; +} + +static __maybe_unused int dcmi_runtime_resume(struct device *dev) +{ + struct stm32_dcmi *dcmi = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(dcmi->mclk); + if (ret) + dev_err(dev, "%s: Failed to prepare_enable clock\n", __func__); + + return ret; +} + +static __maybe_unused int dcmi_suspend(struct device *dev) +{ + /* disable clock */ + pm_runtime_force_suspend(dev); + + /* change pinctrl state */ + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static __maybe_unused int dcmi_resume(struct device *dev) +{ + /* restore pinctl default state */ + pinctrl_pm_select_default_state(dev); + + /* clock enable */ + pm_runtime_force_resume(dev); + + return 0; +} + +static const struct dev_pm_ops dcmi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dcmi_suspend, dcmi_resume) + SET_RUNTIME_PM_OPS(dcmi_runtime_suspend, + dcmi_runtime_resume, NULL) +}; + static struct platform_driver stm32_dcmi_driver = { .probe = dcmi_probe, .remove = dcmi_remove, .driver = { .name = DRV_NAME, .of_match_table = of_match_ptr(stm32_dcmi_of_match), + .pm = &dcmi_pm_ops, }, }; |