diff options
Diffstat (limited to 'drivers/media/platform/mediatek')
18 files changed, 1339 insertions, 188 deletions
diff --git a/drivers/media/platform/mediatek/jpeg/Makefile b/drivers/media/platform/mediatek/jpeg/Makefile index 76c33aad0f3f..26e84852523e 100644 --- a/drivers/media/platform/mediatek/jpeg/Makefile +++ b/drivers/media/platform/mediatek/jpeg/Makefile @@ -1,6 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only -mtk_jpeg-objs := mtk_jpeg_core.o \ - mtk_jpeg_dec_hw.o \ - mtk_jpeg_dec_parse.o \ - mtk_jpeg_enc_hw.o -obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk_jpeg.o +obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk_jpeg.o \ + mtk-jpeg-enc-hw.o \ + mtk-jpeg-dec-hw.o + +mtk_jpeg-y := mtk_jpeg_core.o \ + mtk_jpeg_dec_parse.o + +mtk-jpeg-enc-hw-y := mtk_jpeg_enc_hw.o +mtk-jpeg-dec-hw-y := mtk_jpeg_dec_hw.o diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c index 3071b61946c3..969516a940ba 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c @@ -104,11 +104,11 @@ static struct mtk_jpeg_fmt mtk_jpeg_dec_formats[] = { #define MTK_JPEG_ENC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_enc_formats) #define MTK_JPEG_DEC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_dec_formats) +#define MTK_JPEG_MAX_RETRY_TIME 5000 -struct mtk_jpeg_src_buf { - struct vb2_v4l2_buffer b; - struct list_head list; - struct mtk_jpeg_dec_param dec_param; +enum { + MTK_JPEG_BUF_FLAGS_INIT = 0, + MTK_JPEG_BUF_FLAGS_LAST_FRAME = 1, }; static int debug; @@ -586,6 +586,31 @@ static int mtk_jpeg_enc_s_selection(struct file *file, void *priv, return 0; } +static int mtk_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv); + struct vb2_queue *vq; + struct vb2_buffer *vb; + struct mtk_jpeg_src_buf *jpeg_src_buf; + + if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + goto end; + + vq = v4l2_m2m_get_vq(fh->m2m_ctx, buf->type); + if (buf->index >= vq->num_buffers) { + dev_err(ctx->jpeg->dev, "buffer index out of range\n"); + return -EINVAL; + } + + vb = vq->bufs[buf->index]; + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(vb); + jpeg_src_buf->bs_size = buf->m.planes[0].bytesused; + +end: + return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); +} + static const struct v4l2_ioctl_ops mtk_jpeg_enc_ioctl_ops = { .vidioc_querycap = mtk_jpeg_querycap, .vidioc_enum_fmt_vid_cap = mtk_jpeg_enum_fmt_vid_cap, @@ -611,6 +636,9 @@ static const struct v4l2_ioctl_ops mtk_jpeg_enc_ioctl_ops = { .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + + .vidioc_encoder_cmd = v4l2_m2m_ioctl_encoder_cmd, + .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd, }; static const struct v4l2_ioctl_ops mtk_jpeg_dec_ioctl_ops = { @@ -623,7 +651,7 @@ static const struct v4l2_ioctl_ops mtk_jpeg_dec_ioctl_ops = { .vidioc_g_fmt_vid_out_mplane = mtk_jpeg_g_fmt_vid_mplane, .vidioc_s_fmt_vid_cap_mplane = mtk_jpeg_s_fmt_vid_cap_mplane, .vidioc_s_fmt_vid_out_mplane = mtk_jpeg_s_fmt_vid_out_mplane, - .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, + .vidioc_qbuf = mtk_jpeg_qbuf, .vidioc_subscribe_event = mtk_jpeg_subscribe_event, .vidioc_g_selection = mtk_jpeg_dec_g_selection, @@ -637,6 +665,9 @@ static const struct v4l2_ioctl_ops mtk_jpeg_dec_ioctl_ops = { .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + + .vidioc_decoder_cmd = v4l2_m2m_ioctl_decoder_cmd, + .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd, }; static int mtk_jpeg_queue_setup(struct vb2_queue *q, @@ -678,7 +709,7 @@ static int mtk_jpeg_buf_prepare(struct vb2_buffer *vb) { struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct mtk_jpeg_q_data *q_data = NULL; - struct v4l2_plane_pix_format plane_fmt; + struct v4l2_plane_pix_format plane_fmt = {}; int i; q_data = mtk_jpeg_get_q_data(ctx, vb->vb2_queue->type); @@ -905,6 +936,148 @@ static int mtk_jpeg_set_dec_dst(struct mtk_jpeg_ctx *ctx, return 0; } +static int mtk_jpegenc_get_hw(struct mtk_jpeg_ctx *ctx) +{ + struct mtk_jpegenc_comp_dev *comp_jpeg; + struct mtk_jpeg_dev *jpeg = ctx->jpeg; + unsigned long flags; + int hw_id = -1; + int i; + + spin_lock_irqsave(&jpeg->hw_lock, flags); + for (i = 0; i < MTK_JPEGENC_HW_MAX; i++) { + comp_jpeg = jpeg->enc_hw_dev[i]; + if (comp_jpeg->hw_state == MTK_JPEG_HW_IDLE) { + hw_id = i; + comp_jpeg->hw_state = MTK_JPEG_HW_BUSY; + break; + } + } + spin_unlock_irqrestore(&jpeg->hw_lock, flags); + + return hw_id; +} + +static int mtk_jpegenc_set_hw_param(struct mtk_jpeg_ctx *ctx, + int hw_id, + struct vb2_v4l2_buffer *src_buf, + struct vb2_v4l2_buffer *dst_buf) +{ + struct mtk_jpegenc_comp_dev *jpeg = ctx->jpeg->enc_hw_dev[hw_id]; + + jpeg->hw_param.curr_ctx = ctx; + jpeg->hw_param.src_buffer = src_buf; + jpeg->hw_param.dst_buffer = dst_buf; + + return 0; +} + +static int mtk_jpegenc_put_hw(struct mtk_jpeg_dev *jpeg, int hw_id) +{ + unsigned long flags; + + spin_lock_irqsave(&jpeg->hw_lock, flags); + jpeg->enc_hw_dev[hw_id]->hw_state = MTK_JPEG_HW_IDLE; + spin_unlock_irqrestore(&jpeg->hw_lock, flags); + + return 0; +} + +static void mtk_jpegenc_worker(struct work_struct *work) +{ + struct mtk_jpegenc_comp_dev *comp_jpeg[MTK_JPEGENC_HW_MAX]; + enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; + struct mtk_jpeg_src_buf *jpeg_dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; + int ret, i, hw_id = 0; + unsigned long flags; + + struct mtk_jpeg_ctx *ctx = container_of(work, + struct mtk_jpeg_ctx, + jpeg_work); + struct mtk_jpeg_dev *jpeg = ctx->jpeg; + + for (i = 0; i < MTK_JPEGENC_HW_MAX; i++) + comp_jpeg[i] = jpeg->enc_hw_dev[i]; + i = 0; + +retry_select: + hw_id = mtk_jpegenc_get_hw(ctx); + if (hw_id < 0) { + ret = wait_event_interruptible(jpeg->enc_hw_wq, + atomic_read(&jpeg->enchw_rdy) > 0); + if (ret != 0 || (i++ > MTK_JPEG_MAX_RETRY_TIME)) { + dev_err(jpeg->dev, "%s : %d, all HW are busy\n", + __func__, __LINE__); + v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); + return; + } + + goto retry_select; + } + + atomic_dec(&jpeg->enchw_rdy); + src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + if (!src_buf) + goto getbuf_fail; + + dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); + if (!dst_buf) + goto getbuf_fail; + + v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); + v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); + + v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true); + + mtk_jpegenc_set_hw_param(ctx, hw_id, src_buf, dst_buf); + ret = pm_runtime_get_sync(comp_jpeg[hw_id]->dev); + if (ret < 0) { + dev_err(jpeg->dev, "%s : %d, pm_runtime_get_sync fail !!!\n", + __func__, __LINE__); + goto enc_end; + } + + ret = clk_prepare_enable(comp_jpeg[hw_id]->venc_clk.clks->clk); + if (ret) { + dev_err(jpeg->dev, "%s : %d, jpegenc clk_prepare_enable fail\n", + __func__, __LINE__); + goto enc_end; + } + + schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work, + msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC)); + + spin_lock_irqsave(&comp_jpeg[hw_id]->hw_lock, flags); + jpeg_dst_buf = mtk_jpeg_vb2_to_srcbuf(&dst_buf->vb2_buf); + jpeg_dst_buf->curr_ctx = ctx; + jpeg_dst_buf->frame_num = ctx->total_frame_num; + ctx->total_frame_num++; + mtk_jpeg_enc_reset(comp_jpeg[hw_id]->reg_base); + mtk_jpeg_set_enc_dst(ctx, + comp_jpeg[hw_id]->reg_base, + &dst_buf->vb2_buf); + mtk_jpeg_set_enc_src(ctx, + comp_jpeg[hw_id]->reg_base, + &src_buf->vb2_buf); + mtk_jpeg_set_enc_params(ctx, comp_jpeg[hw_id]->reg_base); + mtk_jpeg_enc_start(comp_jpeg[hw_id]->reg_base); + v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); + spin_unlock_irqrestore(&comp_jpeg[hw_id]->hw_lock, flags); + + return; + +enc_end: + v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); + v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); + v4l2_m2m_buf_done(src_buf, buf_state); + v4l2_m2m_buf_done(dst_buf, buf_state); +getbuf_fail: + atomic_inc(&jpeg->enchw_rdy); + mtk_jpegenc_put_hw(jpeg, hw_id); + v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); +} + static void mtk_jpeg_enc_device_run(void *priv) { struct mtk_jpeg_ctx *ctx = priv; @@ -922,7 +1095,7 @@ static void mtk_jpeg_enc_device_run(void *priv) goto enc_end; schedule_delayed_work(&jpeg->job_timeout_work, - msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC)); + msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC)); spin_lock_irqsave(&jpeg->hw_lock, flags); @@ -947,6 +1120,189 @@ enc_end: v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); } +static void mtk_jpeg_multicore_enc_device_run(void *priv) +{ + struct mtk_jpeg_ctx *ctx = priv; + struct mtk_jpeg_dev *jpeg = ctx->jpeg; + + queue_work(jpeg->workqueue, &ctx->jpeg_work); +} + +static int mtk_jpegdec_get_hw(struct mtk_jpeg_ctx *ctx) +{ + struct mtk_jpegdec_comp_dev *comp_jpeg; + struct mtk_jpeg_dev *jpeg = ctx->jpeg; + unsigned long flags; + int hw_id = -1; + int i; + + spin_lock_irqsave(&jpeg->hw_lock, flags); + for (i = 0; i < MTK_JPEGDEC_HW_MAX; i++) { + comp_jpeg = jpeg->dec_hw_dev[i]; + if (comp_jpeg->hw_state == MTK_JPEG_HW_IDLE) { + hw_id = i; + comp_jpeg->hw_state = MTK_JPEG_HW_BUSY; + break; + } + } + spin_unlock_irqrestore(&jpeg->hw_lock, flags); + + return hw_id; +} + +static int mtk_jpegdec_put_hw(struct mtk_jpeg_dev *jpeg, int hw_id) +{ + unsigned long flags; + + spin_lock_irqsave(&jpeg->hw_lock, flags); + jpeg->dec_hw_dev[hw_id]->hw_state = + MTK_JPEG_HW_IDLE; + spin_unlock_irqrestore(&jpeg->hw_lock, flags); + + return 0; +} + +static int mtk_jpegdec_set_hw_param(struct mtk_jpeg_ctx *ctx, + int hw_id, + struct vb2_v4l2_buffer *src_buf, + struct vb2_v4l2_buffer *dst_buf) +{ + struct mtk_jpegdec_comp_dev *jpeg = + ctx->jpeg->dec_hw_dev[hw_id]; + + jpeg->hw_param.curr_ctx = ctx; + jpeg->hw_param.src_buffer = src_buf; + jpeg->hw_param.dst_buffer = dst_buf; + + return 0; +} + +static void mtk_jpegdec_worker(struct work_struct *work) +{ + struct mtk_jpeg_ctx *ctx = container_of(work, struct mtk_jpeg_ctx, + jpeg_work); + struct mtk_jpegdec_comp_dev *comp_jpeg[MTK_JPEGDEC_HW_MAX]; + enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; + struct mtk_jpeg_src_buf *jpeg_src_buf, *jpeg_dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; + struct mtk_jpeg_dev *jpeg = ctx->jpeg; + int ret, i, hw_id = 0; + struct mtk_jpeg_bs bs; + struct mtk_jpeg_fb fb; + unsigned long flags; + + for (i = 0; i < MTK_JPEGDEC_HW_MAX; i++) + comp_jpeg[i] = jpeg->dec_hw_dev[i]; + i = 0; + +retry_select: + hw_id = mtk_jpegdec_get_hw(ctx); + if (hw_id < 0) { + ret = wait_event_interruptible_timeout(jpeg->dec_hw_wq, + atomic_read(&jpeg->dechw_rdy) > 0, + MTK_JPEG_HW_TIMEOUT_MSEC); + if (ret != 0 || (i++ > MTK_JPEG_MAX_RETRY_TIME)) { + dev_err(jpeg->dev, "%s : %d, all HW are busy\n", + __func__, __LINE__); + v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); + return; + } + + goto retry_select; + } + + atomic_dec(&jpeg->dechw_rdy); + src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + if (!src_buf) + goto getbuf_fail; + + dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); + if (!dst_buf) + goto getbuf_fail; + + v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); + v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); + + v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true); + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf); + jpeg_dst_buf = mtk_jpeg_vb2_to_srcbuf(&dst_buf->vb2_buf); + + if (mtk_jpeg_check_resolution_change(ctx, + &jpeg_src_buf->dec_param)) { + mtk_jpeg_queue_src_chg_event(ctx); + ctx->state = MTK_JPEG_SOURCE_CHANGE; + goto dec_end; + } + + jpeg_src_buf->curr_ctx = ctx; + jpeg_src_buf->frame_num = ctx->total_frame_num; + jpeg_dst_buf->curr_ctx = ctx; + jpeg_dst_buf->frame_num = ctx->total_frame_num; + + mtk_jpegdec_set_hw_param(ctx, hw_id, src_buf, dst_buf); + ret = pm_runtime_get_sync(comp_jpeg[hw_id]->dev); + if (ret < 0) { + dev_err(jpeg->dev, "%s : %d, pm_runtime_get_sync fail !!!\n", + __func__, __LINE__); + goto dec_end; + } + + ret = clk_prepare_enable(comp_jpeg[hw_id]->jdec_clk.clks->clk); + if (ret) { + dev_err(jpeg->dev, "%s : %d, jpegdec clk_prepare_enable fail\n", + __func__, __LINE__); + goto clk_end; + } + + schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work, + msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC)); + + mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs); + if (mtk_jpeg_set_dec_dst(ctx, + &jpeg_src_buf->dec_param, + &dst_buf->vb2_buf, &fb)) { + dev_err(jpeg->dev, "%s : %d, mtk_jpeg_set_dec_dst fail\n", + __func__, __LINE__); + goto setdst_end; + } + + spin_lock_irqsave(&comp_jpeg[hw_id]->hw_lock, flags); + ctx->total_frame_num++; + mtk_jpeg_dec_reset(comp_jpeg[hw_id]->reg_base); + mtk_jpeg_dec_set_config(comp_jpeg[hw_id]->reg_base, + &jpeg_src_buf->dec_param, + jpeg_src_buf->bs_size, + &bs, + &fb); + mtk_jpeg_dec_start(comp_jpeg[hw_id]->reg_base); + v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); + spin_unlock_irqrestore(&comp_jpeg[hw_id]->hw_lock, flags); + + return; + +setdst_end: + clk_disable_unprepare(comp_jpeg[hw_id]->jdec_clk.clks->clk); +clk_end: + pm_runtime_put(comp_jpeg[hw_id]->dev); +dec_end: + v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); + v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); + v4l2_m2m_buf_done(src_buf, buf_state); + v4l2_m2m_buf_done(dst_buf, buf_state); +getbuf_fail: + atomic_inc(&jpeg->dechw_rdy); + mtk_jpegdec_put_hw(jpeg, hw_id); + v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); +} + +static void mtk_jpeg_multicore_dec_device_run(void *priv) +{ + struct mtk_jpeg_ctx *ctx = priv; + struct mtk_jpeg_dev *jpeg = ctx->jpeg; + + queue_work(jpeg->workqueue, &ctx->jpeg_work); +} + static void mtk_jpeg_dec_device_run(void *priv) { struct mtk_jpeg_ctx *ctx = priv; @@ -984,8 +1340,10 @@ static void mtk_jpeg_dec_device_run(void *priv) spin_lock_irqsave(&jpeg->hw_lock, flags); mtk_jpeg_dec_reset(jpeg->reg_base); mtk_jpeg_dec_set_config(jpeg->reg_base, - &jpeg_src_buf->dec_param, &bs, &fb); - + &jpeg_src_buf->dec_param, + jpeg_src_buf->bs_size, + &bs, + &fb); mtk_jpeg_dec_start(jpeg->reg_base); spin_unlock_irqrestore(&jpeg->hw_lock, flags); return; @@ -1009,6 +1367,14 @@ static const struct v4l2_m2m_ops mtk_jpeg_enc_m2m_ops = { .device_run = mtk_jpeg_enc_device_run, }; +static const struct v4l2_m2m_ops mtk_jpeg_multicore_enc_m2m_ops = { + .device_run = mtk_jpeg_multicore_enc_device_run, +}; + +static const struct v4l2_m2m_ops mtk_jpeg_multicore_dec_m2m_ops = { + .device_run = mtk_jpeg_multicore_dec_device_run, +}; + static const struct v4l2_m2m_ops mtk_jpeg_dec_m2m_ops = { .device_run = mtk_jpeg_dec_device_run, .job_ready = mtk_jpeg_dec_job_ready, @@ -1209,6 +1575,14 @@ static int mtk_jpeg_open(struct file *file) goto free; } + if (jpeg->is_jpgenc_multihw) + INIT_WORK(&ctx->jpeg_work, mtk_jpegenc_worker); + + if (jpeg->is_jpgdec_multihw) + INIT_WORK(&ctx->jpeg_work, mtk_jpegdec_worker); + + INIT_LIST_HEAD(&ctx->dst_done_queue); + spin_lock_init(&ctx->done_queue_lock); v4l2_fh_init(&ctx->fh, vfd); file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); @@ -1230,6 +1604,7 @@ static int mtk_jpeg_open(struct file *file) } else { v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 0); } + mtk_jpeg_set_default_params(ctx); mutex_unlock(&jpeg->lock); return 0; @@ -1310,38 +1685,51 @@ static int mtk_jpeg_probe(struct platform_device *pdev) spin_lock_init(&jpeg->hw_lock); jpeg->dev = &pdev->dev; jpeg->variant = of_device_get_match_data(jpeg->dev); - INIT_DELAYED_WORK(&jpeg->job_timeout_work, mtk_jpeg_job_timeout_work); - jpeg->reg_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(jpeg->reg_base)) { - ret = PTR_ERR(jpeg->reg_base); - return ret; + ret = devm_of_platform_populate(&pdev->dev); + if (ret) { + v4l2_err(&jpeg->v4l2_dev, "Master of platform populate failed."); + return -EINVAL; } - jpeg_irq = platform_get_irq(pdev, 0); - if (jpeg_irq < 0) - return jpeg_irq; + if (list_empty(&pdev->dev.devres_head)) { + INIT_DELAYED_WORK(&jpeg->job_timeout_work, + mtk_jpeg_job_timeout_work); - ret = devm_request_irq(&pdev->dev, jpeg_irq, - jpeg->variant->irq_handler, 0, pdev->name, jpeg); - if (ret) { - dev_err(&pdev->dev, "Failed to request jpeg_irq %d (%d)\n", - jpeg_irq, ret); - goto err_req_irq; - } + jpeg->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(jpeg->reg_base)) { + ret = PTR_ERR(jpeg->reg_base); + return ret; + } - ret = devm_clk_bulk_get(jpeg->dev, jpeg->variant->num_clks, - jpeg->variant->clks); - if (ret) { - dev_err(&pdev->dev, "Failed to init clk, err %d\n", ret); - goto err_clk_init; + jpeg_irq = platform_get_irq(pdev, 0); + if (jpeg_irq < 0) + return jpeg_irq; + + ret = devm_request_irq(&pdev->dev, + jpeg_irq, + jpeg->variant->irq_handler, + 0, + pdev->name, jpeg); + if (ret) { + dev_err(&pdev->dev, "Failed to request jpeg_irq %d (%d)\n", + jpeg_irq, ret); + return ret; + } + + ret = devm_clk_bulk_get(jpeg->dev, + jpeg->variant->num_clks, + jpeg->variant->clks); + if (ret) { + dev_err(&pdev->dev, "Failed to init clk\n"); + return ret; + } } ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev); if (ret) { dev_err(&pdev->dev, "Failed to register v4l2 device\n"); - ret = -EINVAL; - goto err_dev_register; + return -EINVAL; } jpeg->m2m_dev = v4l2_m2m_init(jpeg->variant->m2m_ops); @@ -1399,12 +1787,6 @@ err_vfd_jpeg_alloc: err_m2m_init: v4l2_device_unregister(&jpeg->v4l2_dev); -err_dev_register: - -err_clk_init: - -err_req_irq: - return ret; } @@ -1494,6 +1876,29 @@ static const struct mtk_jpeg_variant mtk_jpeg_drvdata = { .cap_q_default_fourcc = V4L2_PIX_FMT_JPEG, }; +static struct mtk_jpeg_variant mtk8195_jpegenc_drvdata = { + .formats = mtk_jpeg_enc_formats, + .num_formats = MTK_JPEG_ENC_NUM_FORMATS, + .qops = &mtk_jpeg_enc_qops, + .m2m_ops = &mtk_jpeg_multicore_enc_m2m_ops, + .dev_name = "mtk-jpeg-enc", + .ioctl_ops = &mtk_jpeg_enc_ioctl_ops, + .out_q_default_fourcc = V4L2_PIX_FMT_YUYV, + .cap_q_default_fourcc = V4L2_PIX_FMT_JPEG, +}; + +static const struct mtk_jpeg_variant mtk8195_jpegdec_drvdata = { + .formats = mtk_jpeg_dec_formats, + .num_formats = MTK_JPEG_DEC_NUM_FORMATS, + .qops = &mtk_jpeg_dec_qops, + .m2m_ops = &mtk_jpeg_multicore_dec_m2m_ops, + .dev_name = "mtk-jpeg-dec", + .ioctl_ops = &mtk_jpeg_dec_ioctl_ops, + .out_q_default_fourcc = V4L2_PIX_FMT_JPEG, + .cap_q_default_fourcc = V4L2_PIX_FMT_YUV420M, +}; + +#if defined(CONFIG_OF) static const struct of_device_id mtk_jpeg_match[] = { { .compatible = "mediatek,mt8173-jpgdec", @@ -1507,17 +1912,26 @@ static const struct of_device_id mtk_jpeg_match[] = { .compatible = "mediatek,mtk-jpgenc", .data = &mtk_jpeg_drvdata, }, + { + .compatible = "mediatek,mt8195-jpgenc", + .data = &mtk8195_jpegenc_drvdata, + }, + { + .compatible = "mediatek,mt8195-jpgdec", + .data = &mtk8195_jpegdec_drvdata, + }, {}, }; MODULE_DEVICE_TABLE(of, mtk_jpeg_match); +#endif static struct platform_driver mtk_jpeg_driver = { .probe = mtk_jpeg_probe, .remove = mtk_jpeg_remove, .driver = { .name = MTK_JPEG_NAME, - .of_match_table = mtk_jpeg_match, + .of_match_table = of_match_ptr(mtk_jpeg_match), .pm = &mtk_jpeg_pm_ops, }, }; diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h index 3e4811a41ba2..b9126476be8f 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h @@ -9,14 +9,16 @@ #ifndef _MTK_JPEG_CORE_H #define _MTK_JPEG_CORE_H +#include <linux/clk.h> #include <linux/interrupt.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fh.h> +#include <media/videobuf2-v4l2.h> -#define MTK_JPEG_NAME "mtk-jpeg" +#include "mtk_jpeg_dec_hw.h" -#define MTK_JPEG_COMP_MAX 3 +#define MTK_JPEG_NAME "mtk-jpeg" #define MTK_JPEG_FMT_FLAG_OUTPUT BIT(0) #define MTK_JPEG_FMT_FLAG_CAPTURE BIT(1) @@ -74,6 +76,115 @@ struct mtk_jpeg_variant { u32 cap_q_default_fourcc; }; +struct mtk_jpeg_src_buf { + u32 frame_num; + struct vb2_v4l2_buffer b; + struct list_head list; + u32 bs_size; + struct mtk_jpeg_dec_param dec_param; + + struct mtk_jpeg_ctx *curr_ctx; +}; + +enum mtk_jpeg_hw_state { + MTK_JPEG_HW_IDLE = 0, + MTK_JPEG_HW_BUSY = 1, +}; + +struct mtk_jpeg_hw_param { + struct vb2_v4l2_buffer *src_buffer; + struct vb2_v4l2_buffer *dst_buffer; + struct mtk_jpeg_ctx *curr_ctx; +}; + +enum mtk_jpegenc_hw_id { + MTK_JPEGENC_HW0, + MTK_JPEGENC_HW1, + MTK_JPEGENC_HW_MAX, +}; + +enum mtk_jpegdec_hw_id { + MTK_JPEGDEC_HW0, + MTK_JPEGDEC_HW1, + MTK_JPEGDEC_HW2, + MTK_JPEGDEC_HW_MAX, +}; + +/** + * struct mtk_jpegenc_clk - Structure used to store vcodec clock information + * @clks: JPEG encode clock + * @clk_num: JPEG encode clock numbers + */ +struct mtk_jpegenc_clk { + struct clk_bulk_data *clks; + int clk_num; +}; + +/** + * struct mtk_jpegdec_clk - Structure used to store vcodec clock information + * @clks: JPEG decode clock + * @clk_num: JPEG decode clock numbers + */ +struct mtk_jpegdec_clk { + struct clk_bulk_data *clks; + int clk_num; +}; + +/** + * struct mtk_jpegenc_comp_dev - JPEG COREX abstraction + * @dev: JPEG device + * @plat_dev: platform device data + * @reg_base: JPEG registers mapping + * @master_dev: mtk_jpeg_dev device + * @venc_clk: jpeg encode clock + * @jpegenc_irq: jpeg encode irq num + * @job_timeout_work: encode timeout workqueue + * @hw_param: jpeg encode hw parameters + * @hw_rdy: record hw ready + * @hw_state: record hw state + * @hw_lock: spinlock protecting the hw device resource + */ +struct mtk_jpegenc_comp_dev { + struct device *dev; + struct platform_device *plat_dev; + void __iomem *reg_base; + struct mtk_jpeg_dev *master_dev; + struct mtk_jpegenc_clk venc_clk; + int jpegenc_irq; + struct delayed_work job_timeout_work; + struct mtk_jpeg_hw_param hw_param; + enum mtk_jpeg_hw_state hw_state; + /* spinlock protecting the hw device resource */ + spinlock_t hw_lock; +}; + +/** + * struct mtk_jpegdec_comp_dev - JPEG COREX abstraction + * @dev: JPEG device + * @plat_dev: platform device data + * @reg_base: JPEG registers mapping + * @master_dev: mtk_jpeg_dev device + * @jdec_clk: mtk_jpegdec_clk + * @jpegdec_irq: jpeg decode irq num + * @job_timeout_work: decode timeout workqueue + * @hw_param: jpeg decode hw parameters + * @hw_state: record hw state + * @hw_lock: spinlock protecting hw + */ +struct mtk_jpegdec_comp_dev { + struct device *dev; + struct platform_device *plat_dev; + void __iomem *reg_base; + struct mtk_jpeg_dev *master_dev; + struct mtk_jpegdec_clk jdec_clk; + int jpegdec_irq; + struct delayed_work job_timeout_work; + struct mtk_jpeg_hw_param hw_param; + enum mtk_jpeg_hw_state hw_state; + /* spinlock protecting the hw device resource */ + spinlock_t hw_lock; +}; + /** * struct mtk_jpeg_dev - JPEG IP abstraction * @lock: the mutex protecting this structure @@ -87,6 +198,17 @@ struct mtk_jpeg_variant { * @reg_base: JPEG registers mapping * @job_timeout_work: IRQ timeout structure * @variant: driver variant to be used + * @reg_encbase: jpg encode register base addr + * @enc_hw_dev: jpg encode hardware device + * @is_jpgenc_multihw: the flag of multi-hw core + * @enc_hw_wq: jpg encode wait queue + * @enchw_rdy: jpg encode hw ready flag + * @reg_decbase: jpg decode register base addr + * @dec_hw_dev: jpg decode hardware device + * @is_jpgdec_multihw: the flag of dec multi-hw core + * @dec_hw_wq: jpg decode wait queue + * @dec_workqueue: jpg decode work queue + * @dechw_rdy: jpg decode hw ready flag */ struct mtk_jpeg_dev { struct mutex lock; @@ -100,6 +222,19 @@ struct mtk_jpeg_dev { void __iomem *reg_base; struct delayed_work job_timeout_work; const struct mtk_jpeg_variant *variant; + + void __iomem *reg_encbase[MTK_JPEGENC_HW_MAX]; + struct mtk_jpegenc_comp_dev *enc_hw_dev[MTK_JPEGENC_HW_MAX]; + bool is_jpgenc_multihw; + wait_queue_head_t enc_hw_wq; + atomic_t enchw_rdy; + + void __iomem *reg_decbase[MTK_JPEGDEC_HW_MAX]; + struct mtk_jpegdec_comp_dev *dec_hw_dev[MTK_JPEGDEC_HW_MAX]; + bool is_jpgdec_multihw; + wait_queue_head_t dec_hw_wq; + struct workqueue_struct *dec_workqueue; + atomic_t dechw_rdy; }; /** @@ -138,15 +273,20 @@ struct mtk_jpeg_q_data { /** * struct mtk_jpeg_ctx - the device context data - * @jpeg: JPEG IP device for this context - * @out_q: source (output) queue information - * @cap_q: destination (capture) queue queue information - * @fh: V4L2 file handle - * @state: state of the context - * @enable_exif: enable exif mode of jpeg encoder - * @enc_quality: jpeg encoder quality - * @restart_interval: jpeg encoder restart interval - * @ctrl_hdl: controls handler + * @jpeg: JPEG IP device for this context + * @out_q: source (output) queue information + * @cap_q: destination queue information + * @fh: V4L2 file handle + * @state: state of the context + * @enable_exif: enable exif mode of jpeg encoder + * @enc_quality: jpeg encoder quality + * @restart_interval: jpeg encoder restart interval + * @ctrl_hdl: controls handler + * @jpeg_work: jpeg encoder workqueue + * @total_frame_num: encoded frame number + * @dst_done_queue: encoded frame buffer queue + * @done_queue_lock: encoded frame operation spinlock + * @last_done_frame_num: the last encoded frame number */ struct mtk_jpeg_ctx { struct mtk_jpeg_dev *jpeg; @@ -158,6 +298,13 @@ struct mtk_jpeg_ctx { u8 enc_quality; u8 restart_interval; struct v4l2_ctrl_handler ctrl_hdl; + + struct work_struct jpeg_work; + u32 total_frame_num; + struct list_head dst_done_queue; + /* spinlock protecting the encode done buffer */ + spinlock_t done_queue_lock; + u32 last_done_frame_num; }; #endif /* _MTK_JPEG_CORE_H */ diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c index afbbfd5d02bc..8c07fa02fd9a 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c @@ -5,10 +5,26 @@ * Rick Chang <rick.chang@mediatek.com> */ +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <media/media-device.h> #include <media/videobuf2-core.h> - +#include <media/videobuf2-v4l2.h> +#include <media/v4l2-mem2mem.h> +#include <media/v4l2-dev.h> +#include <media/v4l2-device.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-event.h> + +#include "mtk_jpeg_core.h" #include "mtk_jpeg_dec_hw.h" #define MTK_JPEG_DUNUM_MASK(val) (((val) - 1) & 0x3) @@ -23,6 +39,16 @@ enum mtk_jpeg_color { MTK_JPEG_COLOR_400 = 0x00110000 }; +#if defined(CONFIG_OF) +static const struct of_device_id mtk_jpegdec_hw_ids[] = { + { + .compatible = "mediatek,mt8195-jpgdec-hw", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_jpegdec_hw_ids); +#endif + static inline int mtk_jpeg_verify_align(u32 val, int align, u32 reg) { if (val & (align - 1)) { @@ -188,6 +214,7 @@ int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param) return 0; } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_fill_param); u32 mtk_jpeg_dec_get_int_status(void __iomem *base) { @@ -199,6 +226,7 @@ u32 mtk_jpeg_dec_get_int_status(void __iomem *base) return ret; } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_get_int_status); u32 mtk_jpeg_dec_enum_result(u32 irq_result) { @@ -215,11 +243,13 @@ u32 mtk_jpeg_dec_enum_result(u32 irq_result) return MTK_JPEG_DEC_RESULT_ERROR_UNKNOWN; } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_enum_result); void mtk_jpeg_dec_start(void __iomem *base) { writel(0, base + JPGDEC_REG_TRIG); } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_start); static void mtk_jpeg_dec_soft_reset(void __iomem *base) { @@ -239,6 +269,7 @@ void mtk_jpeg_dec_reset(void __iomem *base) mtk_jpeg_dec_soft_reset(base); mtk_jpeg_dec_hard_reset(base); } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_reset); static void mtk_jpeg_dec_set_brz_factor(void __iomem *base, u8 yscale_w, u8 yscale_h, u8 uvscale_w, u8 uvscale_h) @@ -299,12 +330,14 @@ static void mtk_jpeg_dec_set_bs_write_ptr(void __iomem *base, u32 ptr) writel(ptr, base + JPGDEC_REG_FILE_BRP); } -static void mtk_jpeg_dec_set_bs_info(void __iomem *base, u32 addr, u32 size) +static void mtk_jpeg_dec_set_bs_info(void __iomem *base, u32 addr, u32 size, + u32 bitstream_size) { mtk_jpeg_verify_align(addr, 16, JPGDEC_REG_FILE_ADDR); mtk_jpeg_verify_align(size, 128, JPGDEC_REG_FILE_TOTAL_SIZE); writel(addr, base + JPGDEC_REG_FILE_ADDR); writel(size, base + JPGDEC_REG_FILE_TOTAL_SIZE); + writel(bitstream_size, base + JPGDEC_REG_BIT_STREAM_SIZE); } static void mtk_jpeg_dec_set_comp_id(void __iomem *base, u32 id_y, u32 id_u, @@ -373,37 +406,275 @@ static void mtk_jpeg_dec_set_sampling_factor(void __iomem *base, u32 comp_num, } void mtk_jpeg_dec_set_config(void __iomem *base, - struct mtk_jpeg_dec_param *config, + struct mtk_jpeg_dec_param *cfg, + u32 bitstream_size, struct mtk_jpeg_bs *bs, struct mtk_jpeg_fb *fb) { - mtk_jpeg_dec_set_brz_factor(base, 0, 0, config->uv_brz_w, 0); + mtk_jpeg_dec_set_brz_factor(base, 0, 0, cfg->uv_brz_w, 0); mtk_jpeg_dec_set_dec_mode(base, 0); - mtk_jpeg_dec_set_comp0_du(base, config->unit_num); - mtk_jpeg_dec_set_total_mcu(base, config->total_mcu); - mtk_jpeg_dec_set_bs_info(base, bs->str_addr, bs->size); + mtk_jpeg_dec_set_comp0_du(base, cfg->unit_num); + mtk_jpeg_dec_set_total_mcu(base, cfg->total_mcu); + mtk_jpeg_dec_set_bs_info(base, bs->str_addr, bs->size, bitstream_size); mtk_jpeg_dec_set_bs_write_ptr(base, bs->end_addr); - mtk_jpeg_dec_set_du_membership(base, config->membership, 1, - (config->comp_num == 1) ? 1 : 0); - mtk_jpeg_dec_set_comp_id(base, config->comp_id[0], config->comp_id[1], - config->comp_id[2]); - mtk_jpeg_dec_set_q_table(base, config->qtbl_num[0], - config->qtbl_num[1], config->qtbl_num[2]); - mtk_jpeg_dec_set_sampling_factor(base, config->comp_num, - config->sampling_w[0], - config->sampling_h[0], - config->sampling_w[1], - config->sampling_h[1], - config->sampling_w[2], - config->sampling_h[2]); - mtk_jpeg_dec_set_mem_stride(base, config->mem_stride[0], - config->mem_stride[1]); - mtk_jpeg_dec_set_img_stride(base, config->img_stride[0], - config->img_stride[1]); + mtk_jpeg_dec_set_du_membership(base, cfg->membership, 1, + (cfg->comp_num == 1) ? 1 : 0); + mtk_jpeg_dec_set_comp_id(base, cfg->comp_id[0], cfg->comp_id[1], + cfg->comp_id[2]); + mtk_jpeg_dec_set_q_table(base, cfg->qtbl_num[0], + cfg->qtbl_num[1], cfg->qtbl_num[2]); + mtk_jpeg_dec_set_sampling_factor(base, cfg->comp_num, + cfg->sampling_w[0], + cfg->sampling_h[0], + cfg->sampling_w[1], + cfg->sampling_h[1], + cfg->sampling_w[2], + cfg->sampling_h[2]); + mtk_jpeg_dec_set_mem_stride(base, cfg->mem_stride[0], + cfg->mem_stride[1]); + mtk_jpeg_dec_set_img_stride(base, cfg->img_stride[0], + cfg->img_stride[1]); mtk_jpeg_dec_set_dst_bank0(base, fb->plane_addr[0], fb->plane_addr[1], fb->plane_addr[2]); mtk_jpeg_dec_set_dst_bank1(base, 0, 0, 0); - mtk_jpeg_dec_set_dma_group(base, config->dma_mcu, config->dma_group, - config->dma_last_mcu); - mtk_jpeg_dec_set_pause_mcu_idx(base, config->total_mcu); + mtk_jpeg_dec_set_dma_group(base, cfg->dma_mcu, cfg->dma_group, + cfg->dma_last_mcu); + mtk_jpeg_dec_set_pause_mcu_idx(base, cfg->total_mcu); +} +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_set_config); + +static void mtk_jpegdec_put_buf(struct mtk_jpegdec_comp_dev *jpeg) +{ + struct mtk_jpeg_src_buf *dst_done_buf, *tmp_dst_done_buf; + struct vb2_v4l2_buffer *dst_buffer; + struct list_head *temp_entry; + struct list_head *pos = NULL; + struct mtk_jpeg_ctx *ctx; + unsigned long flags; + + ctx = jpeg->hw_param.curr_ctx; + if (unlikely(!ctx)) { + dev_err(jpeg->dev, "comp_jpeg ctx fail !!!\n"); + return; + } + + dst_buffer = jpeg->hw_param.dst_buffer; + if (!dst_buffer) { + dev_err(jpeg->dev, "comp_jpeg dst_buffer fail !!!\n"); + return; + } + + dst_done_buf = container_of(dst_buffer, struct mtk_jpeg_src_buf, b); + + spin_lock_irqsave(&ctx->done_queue_lock, flags); + list_add_tail(&dst_done_buf->list, &ctx->dst_done_queue); + while (!list_empty(&ctx->dst_done_queue) && + (pos != &ctx->dst_done_queue)) { + list_for_each_prev_safe(pos, temp_entry, &ctx->dst_done_queue) { + tmp_dst_done_buf = list_entry(pos, + struct mtk_jpeg_src_buf, + list); + if (tmp_dst_done_buf->frame_num == + ctx->last_done_frame_num) { + list_del(&tmp_dst_done_buf->list); + v4l2_m2m_buf_done(&tmp_dst_done_buf->b, + VB2_BUF_STATE_DONE); + ctx->last_done_frame_num++; + } + } + } + spin_unlock_irqrestore(&ctx->done_queue_lock, flags); +} + +static void mtk_jpegdec_timeout_work(struct work_struct *work) +{ + enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; + struct mtk_jpegdec_comp_dev *cjpeg = + container_of(work, struct mtk_jpegdec_comp_dev, + job_timeout_work.work); + struct mtk_jpeg_dev *master_jpeg = cjpeg->master_dev; + struct vb2_v4l2_buffer *src_buf, *dst_buf; + + src_buf = cjpeg->hw_param.src_buffer; + dst_buf = cjpeg->hw_param.dst_buffer; + v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true); + + mtk_jpeg_dec_reset(cjpeg->reg_base); + clk_disable_unprepare(cjpeg->jdec_clk.clks->clk); + pm_runtime_put(cjpeg->dev); + cjpeg->hw_state = MTK_JPEG_HW_IDLE; + atomic_inc(&master_jpeg->dechw_rdy); + wake_up(&master_jpeg->dec_hw_wq); + v4l2_m2m_buf_done(src_buf, buf_state); + mtk_jpegdec_put_buf(cjpeg); +} + +static irqreturn_t mtk_jpegdec_hw_irq_handler(int irq, void *priv) +{ + struct vb2_v4l2_buffer *src_buf, *dst_buf; + struct mtk_jpeg_src_buf *jpeg_src_buf; + enum vb2_buffer_state buf_state; + struct mtk_jpeg_ctx *ctx; + u32 dec_irq_ret; + u32 irq_status; + int i; + + struct mtk_jpegdec_comp_dev *jpeg = priv; + struct mtk_jpeg_dev *master_jpeg = jpeg->master_dev; + + cancel_delayed_work(&jpeg->job_timeout_work); + + ctx = jpeg->hw_param.curr_ctx; + src_buf = jpeg->hw_param.src_buffer; + dst_buf = jpeg->hw_param.dst_buffer; + v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true); + + irq_status = mtk_jpeg_dec_get_int_status(jpeg->reg_base); + dec_irq_ret = mtk_jpeg_dec_enum_result(irq_status); + if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW) + mtk_jpeg_dec_reset(jpeg->reg_base); + + if (dec_irq_ret != MTK_JPEG_DEC_RESULT_EOF_DONE) + dev_warn(jpeg->dev, "Jpg Dec occurs unknown Err."); + + jpeg_src_buf = + container_of(src_buf, struct mtk_jpeg_src_buf, b); + + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++) + vb2_set_plane_payload(&dst_buf->vb2_buf, i, + jpeg_src_buf->dec_param.comp_size[i]); + + buf_state = VB2_BUF_STATE_DONE; + v4l2_m2m_buf_done(src_buf, buf_state); + mtk_jpegdec_put_buf(jpeg); + pm_runtime_put(ctx->jpeg->dev); + clk_disable_unprepare(jpeg->jdec_clk.clks->clk); + + jpeg->hw_state = MTK_JPEG_HW_IDLE; + wake_up(&master_jpeg->dec_hw_wq); + atomic_inc(&master_jpeg->dechw_rdy); + + return IRQ_HANDLED; +} + +static int mtk_jpegdec_hw_init_irq(struct mtk_jpegdec_comp_dev *dev) +{ + struct platform_device *pdev = dev->plat_dev; + int ret; + + dev->jpegdec_irq = platform_get_irq(pdev, 0); + if (dev->jpegdec_irq < 0) + return dev->jpegdec_irq; + + ret = devm_request_irq(&pdev->dev, + dev->jpegdec_irq, + mtk_jpegdec_hw_irq_handler, + 0, + pdev->name, dev); + if (ret) { + dev_err(&pdev->dev, "Failed to devm_request_irq %d (%d)", + dev->jpegdec_irq, ret); + return ret; + } + + return 0; } + +static void mtk_jpegdec_destroy_workqueue(void *data) +{ + destroy_workqueue(data); +} + +static int mtk_jpegdec_hw_probe(struct platform_device *pdev) +{ + struct mtk_jpegdec_clk *jpegdec_clk; + struct mtk_jpeg_dev *master_dev; + struct mtk_jpegdec_comp_dev *dev; + int ret, i; + + struct device *decs = &pdev->dev; + + if (!decs->parent) + return -EPROBE_DEFER; + + master_dev = dev_get_drvdata(decs->parent); + if (!master_dev) + return -EPROBE_DEFER; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->plat_dev = pdev; + dev->dev = &pdev->dev; + + if (!master_dev->is_jpgdec_multihw) { + master_dev->is_jpgdec_multihw = true; + for (i = 0; i < MTK_JPEGDEC_HW_MAX; i++) + master_dev->dec_hw_dev[i] = NULL; + + init_waitqueue_head(&master_dev->dec_hw_wq); + master_dev->workqueue = alloc_ordered_workqueue(MTK_JPEG_NAME, + WQ_MEM_RECLAIM + | WQ_FREEZABLE); + if (!master_dev->workqueue) + return -EINVAL; + + ret = devm_add_action_or_reset(&pdev->dev, mtk_jpegdec_destroy_workqueue, + master_dev->workqueue); + if (ret) + return ret; + } + + atomic_set(&master_dev->dechw_rdy, MTK_JPEGDEC_HW_MAX); + spin_lock_init(&dev->hw_lock); + dev->hw_state = MTK_JPEG_HW_IDLE; + + INIT_DELAYED_WORK(&dev->job_timeout_work, + mtk_jpegdec_timeout_work); + + jpegdec_clk = &dev->jdec_clk; + + jpegdec_clk->clk_num = devm_clk_bulk_get_all(&pdev->dev, + &jpegdec_clk->clks); + if (jpegdec_clk->clk_num < 0) + return dev_err_probe(&pdev->dev, + jpegdec_clk->clk_num, + "Failed to get jpegdec clock count.\n"); + + dev->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dev->reg_base)) + return PTR_ERR(dev->reg_base); + + ret = mtk_jpegdec_hw_init_irq(dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to register JPEGDEC irq handler.\n"); + + for (i = 0; i < MTK_JPEGDEC_HW_MAX; i++) { + if (master_dev->dec_hw_dev[i]) + continue; + + master_dev->dec_hw_dev[i] = dev; + master_dev->reg_decbase[i] = dev->reg_base; + dev->master_dev = master_dev; + } + + platform_set_drvdata(pdev, dev); + pm_runtime_enable(&pdev->dev); + + return 0; +} + +static struct platform_driver mtk_jpegdec_hw_driver = { + .probe = mtk_jpegdec_hw_probe, + .driver = { + .name = "mtk-jpegdec-hw", + .of_match_table = of_match_ptr(mtk_jpegdec_hw_ids), + }, +}; + +module_platform_driver(mtk_jpegdec_hw_driver); + +MODULE_DESCRIPTION("MediaTek JPEG decode HW driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h index fa0d45fd7c34..8c31c6b12417 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h @@ -11,9 +11,10 @@ #include <media/videobuf2-core.h> -#include "mtk_jpeg_core.h" #include "mtk_jpeg_dec_reg.h" +#define MTK_JPEG_COMP_MAX 3 + enum { MTK_JPEG_DEC_RESULT_EOF_DONE = 0, MTK_JPEG_DEC_RESULT_PAUSE = 1, @@ -70,7 +71,8 @@ int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param); u32 mtk_jpeg_dec_get_int_status(void __iomem *dec_reg_base); u32 mtk_jpeg_dec_enum_result(u32 irq_result); void mtk_jpeg_dec_set_config(void __iomem *base, - struct mtk_jpeg_dec_param *config, + struct mtk_jpeg_dec_param *cfg, + u32 bitstream_size, struct mtk_jpeg_bs *bs, struct mtk_jpeg_fb *fb); void mtk_jpeg_dec_reset(void __iomem *dec_reg_base); diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h index 21ec8f96797f..27b7711ca341 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h @@ -45,5 +45,6 @@ #define JPGDEC_REG_QT_ID 0x0270 #define JPGDEC_REG_INTERRUPT_STATUS 0x0274 #define JPGDEC_REG_STATUS 0x0278 +#define JPGDEC_REG_BIT_STREAM_SIZE 0x0344 #endif /* _MTK_JPEG_REG_H */ diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c index 1cf037bf72dd..1bbb712d78d0 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c @@ -5,11 +5,27 @@ * */ +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <media/media-device.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> +#include <media/videobuf2-v4l2.h> +#include <media/v4l2-mem2mem.h> +#include <media/v4l2-dev.h> +#include <media/v4l2-device.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-event.h> +#include "mtk_jpeg_core.h" #include "mtk_jpeg_enc_hw.h" static const struct mtk_jpeg_enc_qlt mtk_jpeg_enc_quality[] = { @@ -30,18 +46,30 @@ static const struct mtk_jpeg_enc_qlt mtk_jpeg_enc_quality[] = { {.quality_param = 97, .hardware_value = JPEG_ENC_QUALITY_Q97}, }; +#if defined(CONFIG_OF) +static const struct of_device_id mtk_jpegenc_drv_ids[] = { + { + .compatible = "mediatek,mt8195-jpgenc-hw", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_jpegenc_drv_ids); +#endif + void mtk_jpeg_enc_reset(void __iomem *base) { writel(0, base + JPEG_ENC_RSTB); writel(JPEG_ENC_RESET_BIT, base + JPEG_ENC_RSTB); writel(0, base + JPEG_ENC_CODEC_SEL); } +EXPORT_SYMBOL_GPL(mtk_jpeg_enc_reset); u32 mtk_jpeg_enc_get_file_size(void __iomem *base) { return readl(base + JPEG_ENC_DMA_ADDR0) - readl(base + JPEG_ENC_DST_ADDR0); } +EXPORT_SYMBOL_GPL(mtk_jpeg_enc_get_file_size); void mtk_jpeg_enc_start(void __iomem *base) { @@ -51,6 +79,7 @@ void mtk_jpeg_enc_start(void __iomem *base) value |= JPEG_ENC_CTRL_INT_EN_BIT | JPEG_ENC_CTRL_ENABLE_BIT; writel(value, base + JPEG_ENC_CTRL); } +EXPORT_SYMBOL_GPL(mtk_jpeg_enc_start); void mtk_jpeg_set_enc_src(struct mtk_jpeg_ctx *ctx, void __iomem *base, struct vb2_buffer *src_buf) @@ -67,6 +96,7 @@ void mtk_jpeg_set_enc_src(struct mtk_jpeg_ctx *ctx, void __iomem *base, writel(dma_addr, base + JPEG_ENC_SRC_CHROMA_ADDR); } } +EXPORT_SYMBOL_GPL(mtk_jpeg_set_enc_src); void mtk_jpeg_set_enc_dst(struct mtk_jpeg_ctx *ctx, void __iomem *base, struct vb2_buffer *dst_buf) @@ -86,6 +116,7 @@ void mtk_jpeg_set_enc_dst(struct mtk_jpeg_ctx *ctx, void __iomem *base, writel(dma_addr & ~0xf, base + JPEG_ENC_DST_ADDR0); writel((dma_addr + size) & ~0xf, base + JPEG_ENC_STALL_ADDR0); } +EXPORT_SYMBOL_GPL(mtk_jpeg_set_enc_dst); void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base) { @@ -152,3 +183,227 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base) writel(ctx->restart_interval, base + JPEG_ENC_RST_MCU_NUM); } +EXPORT_SYMBOL_GPL(mtk_jpeg_set_enc_params); + +static void mtk_jpegenc_put_buf(struct mtk_jpegenc_comp_dev *jpeg) +{ + struct mtk_jpeg_ctx *ctx; + struct vb2_v4l2_buffer *dst_buffer; + struct list_head *temp_entry; + struct list_head *pos = NULL; + struct mtk_jpeg_src_buf *dst_done_buf, *tmp_dst_done_buf; + unsigned long flags; + + ctx = jpeg->hw_param.curr_ctx; + if (!ctx) { + dev_err(jpeg->dev, "comp_jpeg ctx fail !!!\n"); + return; + } + + dst_buffer = jpeg->hw_param.dst_buffer; + if (!dst_buffer) { + dev_err(jpeg->dev, "comp_jpeg dst_buffer fail !!!\n"); + return; + } + + dst_done_buf = container_of(dst_buffer, + struct mtk_jpeg_src_buf, b); + + spin_lock_irqsave(&ctx->done_queue_lock, flags); + list_add_tail(&dst_done_buf->list, &ctx->dst_done_queue); + while (!list_empty(&ctx->dst_done_queue) && + (pos != &ctx->dst_done_queue)) { + list_for_each_prev_safe(pos, temp_entry, &ctx->dst_done_queue) { + tmp_dst_done_buf = list_entry(pos, + struct mtk_jpeg_src_buf, + list); + if (tmp_dst_done_buf->frame_num == + ctx->last_done_frame_num) { + list_del(&tmp_dst_done_buf->list); + v4l2_m2m_buf_done(&tmp_dst_done_buf->b, + VB2_BUF_STATE_DONE); + ctx->last_done_frame_num++; + } + } + } + spin_unlock_irqrestore(&ctx->done_queue_lock, flags); +} + +static void mtk_jpegenc_timeout_work(struct work_struct *work) +{ + struct delayed_work *dly_work = to_delayed_work(work); + struct mtk_jpegenc_comp_dev *cjpeg = + container_of(dly_work, + struct mtk_jpegenc_comp_dev, + job_timeout_work); + struct mtk_jpeg_dev *master_jpeg = cjpeg->master_dev; + enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; + struct vb2_v4l2_buffer *src_buf, *dst_buf; + + src_buf = cjpeg->hw_param.src_buffer; + dst_buf = cjpeg->hw_param.dst_buffer; + v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true); + + mtk_jpeg_enc_reset(cjpeg->reg_base); + clk_disable_unprepare(cjpeg->venc_clk.clks->clk); + pm_runtime_put(cjpeg->dev); + cjpeg->hw_state = MTK_JPEG_HW_IDLE; + atomic_inc(&master_jpeg->enchw_rdy); + wake_up(&master_jpeg->enc_hw_wq); + v4l2_m2m_buf_done(src_buf, buf_state); + mtk_jpegenc_put_buf(cjpeg); +} + +static irqreturn_t mtk_jpegenc_hw_irq_handler(int irq, void *priv) +{ + struct vb2_v4l2_buffer *src_buf, *dst_buf; + enum vb2_buffer_state buf_state; + struct mtk_jpeg_ctx *ctx; + u32 result_size; + u32 irq_status; + + struct mtk_jpegenc_comp_dev *jpeg = priv; + struct mtk_jpeg_dev *master_jpeg = jpeg->master_dev; + + cancel_delayed_work(&jpeg->job_timeout_work); + + ctx = jpeg->hw_param.curr_ctx; + src_buf = jpeg->hw_param.src_buffer; + dst_buf = jpeg->hw_param.dst_buffer; + v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true); + + irq_status = readl(jpeg->reg_base + JPEG_ENC_INT_STS) & + JPEG_ENC_INT_STATUS_MASK_ALLIRQ; + if (irq_status) + writel(0, jpeg->reg_base + JPEG_ENC_INT_STS); + if (!(irq_status & JPEG_ENC_INT_STATUS_DONE)) + dev_warn(jpeg->dev, "Jpg Enc occurs unknown Err."); + + result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base); + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, result_size); + buf_state = VB2_BUF_STATE_DONE; + v4l2_m2m_buf_done(src_buf, buf_state); + mtk_jpegenc_put_buf(jpeg); + pm_runtime_put(ctx->jpeg->dev); + clk_disable_unprepare(jpeg->venc_clk.clks->clk); + if (!list_empty(&ctx->fh.m2m_ctx->out_q_ctx.rdy_queue) || + !list_empty(&ctx->fh.m2m_ctx->cap_q_ctx.rdy_queue)) { + queue_work(master_jpeg->workqueue, &ctx->jpeg_work); + } + + jpeg->hw_state = MTK_JPEG_HW_IDLE; + wake_up(&master_jpeg->enc_hw_wq); + atomic_inc(&master_jpeg->enchw_rdy); + + return IRQ_HANDLED; +} + +static int mtk_jpegenc_hw_init_irq(struct mtk_jpegenc_comp_dev *dev) +{ + struct platform_device *pdev = dev->plat_dev; + int ret; + + dev->jpegenc_irq = platform_get_irq(pdev, 0); + if (dev->jpegenc_irq < 0) + return dev->jpegenc_irq; + + ret = devm_request_irq(&pdev->dev, + dev->jpegenc_irq, + mtk_jpegenc_hw_irq_handler, + 0, + pdev->name, dev); + if (ret) { + dev_err(&pdev->dev, "Failed to devm_request_irq %d (%d)", + dev->jpegenc_irq, ret); + return ret; + } + + return 0; +} + +static int mtk_jpegenc_hw_probe(struct platform_device *pdev) +{ + struct mtk_jpegenc_clk *jpegenc_clk; + struct mtk_jpeg_dev *master_dev; + struct mtk_jpegenc_comp_dev *dev; + int ret, i; + + struct device *decs = &pdev->dev; + + if (!decs->parent) + return -EPROBE_DEFER; + + master_dev = dev_get_drvdata(decs->parent); + if (!master_dev) + return -EPROBE_DEFER; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->plat_dev = pdev; + dev->dev = &pdev->dev; + + if (!master_dev->is_jpgenc_multihw) { + master_dev->is_jpgenc_multihw = true; + for (i = 0; i < MTK_JPEGENC_HW_MAX; i++) + master_dev->enc_hw_dev[i] = NULL; + + init_waitqueue_head(&master_dev->enc_hw_wq); + master_dev->workqueue = alloc_ordered_workqueue(MTK_JPEG_NAME, + WQ_MEM_RECLAIM + | WQ_FREEZABLE); + if (!master_dev->workqueue) + return -EINVAL; + } + + atomic_set(&master_dev->enchw_rdy, MTK_JPEGENC_HW_MAX); + spin_lock_init(&dev->hw_lock); + dev->hw_state = MTK_JPEG_HW_IDLE; + + INIT_DELAYED_WORK(&dev->job_timeout_work, + mtk_jpegenc_timeout_work); + + jpegenc_clk = &dev->venc_clk; + + jpegenc_clk->clk_num = devm_clk_bulk_get_all(&pdev->dev, + &jpegenc_clk->clks); + if (jpegenc_clk->clk_num < 0) + return dev_err_probe(&pdev->dev, jpegenc_clk->clk_num, + "Failed to get jpegenc clock count\n"); + + dev->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dev->reg_base)) + return PTR_ERR(dev->reg_base); + + ret = mtk_jpegenc_hw_init_irq(dev); + if (ret) + return ret; + + for (i = 0; i < MTK_JPEGENC_HW_MAX; i++) { + if (master_dev->enc_hw_dev[i]) + continue; + + master_dev->enc_hw_dev[i] = dev; + master_dev->reg_encbase[i] = dev->reg_base; + dev->master_dev = master_dev; + } + + platform_set_drvdata(pdev, dev); + pm_runtime_enable(&pdev->dev); + + return 0; +} + +static struct platform_driver mtk_jpegenc_hw_driver = { + .probe = mtk_jpegenc_hw_probe, + .driver = { + .name = "mtk-jpegenc-hw", + .of_match_table = of_match_ptr(mtk_jpegenc_drv_ids), + }, +}; + +module_platform_driver(mtk_jpegenc_hw_driver); + +MODULE_DESCRIPTION("MediaTek JPEG encode HW driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c index 1e3833f1c9ae..ad5fab2d8bfa 100644 --- a/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c +++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c @@ -52,9 +52,8 @@ int mtk_mdp_comp_init(struct device *dev, struct device_node *node, for (i = 0; i < ARRAY_SIZE(comp->clk); i++) { comp->clk[i] = of_clk_get(node, i); if (IS_ERR(comp->clk[i])) { - if (PTR_ERR(comp->clk[i]) != -EPROBE_DEFER) - dev_err(dev, "Failed to get clock\n"); - ret = PTR_ERR(comp->clk[i]); + ret = dev_err_probe(dev, PTR_ERR(comp->clk[i]), + "Failed to get clock\n"); goto put_dev; } diff --git a/drivers/media/platform/mediatek/mdp3/Kconfig b/drivers/media/platform/mediatek/mdp3/Kconfig index 50ae07b75b5f..846e759a8f6a 100644 --- a/drivers/media/platform/mediatek/mdp3/Kconfig +++ b/drivers/media/platform/mediatek/mdp3/Kconfig @@ -9,7 +9,6 @@ config VIDEO_MEDIATEK_MDP3 select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV select MTK_MMSYS - select VIDEO_MEDIATEK_VPU select MTK_CMDQ select MTK_SCP default n diff --git a/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h b/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h index 3e66ebaee2da..c7f231f8ea3e 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h +++ b/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h @@ -51,14 +51,14 @@ struct img_sw_addr { struct img_plane_format { u32 size; - u16 stride; + u32 stride; } __packed; struct img_pix_format { - u16 width; - u16 height; + u32 width; + u32 height; u32 colorformat; /* enum mdp_color */ - u16 ycbcr_prof; /* enum mdp_ycbcr_profile */ + u32 ycbcr_prof; /* enum mdp_ycbcr_profile */ struct img_plane_format plane_fmt[IMG_MAX_PLANES]; } __packed; @@ -72,10 +72,10 @@ struct img_image_buffer { #define IMG_SUBPIXEL_SHIFT 20 struct img_crop { - s16 left; - s16 top; - u16 width; - u16 height; + s32 left; + s32 top; + u32 width; + u32 height; u32 left_subpix; u32 top_subpix; u32 width_subpix; @@ -90,24 +90,24 @@ struct img_crop { struct img_input { struct img_image_buffer buffer; - u16 flags; /* HDR, DRE, dither */ + u32 flags; /* HDR, DRE, dither */ } __packed; struct img_output { struct img_image_buffer buffer; struct img_crop crop; - s16 rotation; - u16 flags; /* H-flip, sharpness, dither */ + s32 rotation; + u32 flags; /* H-flip, sharpness, dither */ } __packed; struct img_ipi_frameparam { u32 index; u32 frame_no; struct img_timeval timestamp; - u8 type; /* enum mdp_stream_type */ - u8 state; - u8 num_inputs; - u8 num_outputs; + u32 type; /* enum mdp_stream_type */ + u32 state; + u32 num_inputs; + u32 num_outputs; u64 drv_data; struct img_input inputs[IMG_MAX_HW_INPUTS]; struct img_output outputs[IMG_MAX_HW_OUTPUTS]; @@ -123,51 +123,51 @@ struct img_sw_buffer { } __packed; struct img_ipi_param { - u8 usage; + u32 usage; struct img_sw_buffer frm_param; } __packed; struct img_frameparam { struct list_head list_entry; struct img_ipi_frameparam frameparam; -}; +} __packed; /* ISP-MDP generic output information */ struct img_comp_frame { - u32 output_disable:1; - u32 bypass:1; - u16 in_width; - u16 in_height; - u16 out_width; - u16 out_height; + u32 output_disable; + u32 bypass; + u32 in_width; + u32 in_height; + u32 out_width; + u32 out_height; struct img_crop crop; - u16 in_total_width; - u16 out_total_width; + u32 in_total_width; + u32 out_total_width; } __packed; struct img_region { - s16 left; - s16 right; - s16 top; - s16 bottom; + s32 left; + s32 right; + s32 top; + s32 bottom; } __packed; struct img_offset { - s16 left; - s16 top; + s32 left; + s32 top; u32 left_subpix; u32 top_subpix; } __packed; struct img_comp_subfrm { - u32 tile_disable:1; + u32 tile_disable; struct img_region in; struct img_region out; struct img_offset luma; struct img_offset chroma; - s16 out_vertical; /* Output vertical index */ - s16 out_horizontal; /* Output horizontal index */ + s32 out_vertical; /* Output vertical index */ + s32 out_horizontal; /* Output horizontal index */ } __packed; #define IMG_MAX_SUBFRAMES 14 @@ -250,8 +250,8 @@ struct isp_data { } __packed; struct img_compparam { - u16 type; /* enum mdp_comp_type */ - u16 id; /* enum mtk_mdp_comp_id */ + u32 type; /* enum mdp_comp_id */ + u32 id; /* engine alias_id */ u32 input; u32 outputs[IMG_MAX_HW_OUTPUTS]; u32 num_outputs; @@ -273,12 +273,12 @@ struct img_mux { u32 reg; u32 value; u32 subsys_id; -}; +} __packed; struct img_mmsys_ctrl { struct img_mux sets[IMG_MAX_COMPONENTS * 2]; u32 num_sets; -}; +} __packed; struct img_config { struct img_compparam components[IMG_MAX_COMPONENTS]; diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c index 86c054600a08..124c1b96e96b 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c @@ -252,10 +252,9 @@ static int mdp_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, dma_addr_t dma_addr; pkt->va_base = kzalloc(size, GFP_KERNEL); - if (!pkt->va_base) { - kfree(pkt); + if (!pkt->va_base) return -ENOMEM; - } + pkt->buf_size = size; pkt->cl = (void *)client; @@ -368,25 +367,30 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { ret = -ENOMEM; - goto err_cmdq_data; + goto err_cancel_job; } - if (mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K)) { - ret = -ENOMEM; - goto err_cmdq_data; - } + ret = mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K); + if (ret) + goto err_free_cmd; comps = kcalloc(param->config->num_components, sizeof(*comps), GFP_KERNEL); if (!comps) { ret = -ENOMEM; - goto err_cmdq_data; + goto err_destroy_pkt; } path = kzalloc(sizeof(*path), GFP_KERNEL); if (!path) { ret = -ENOMEM; - goto err_cmdq_data; + goto err_free_comps; + } + + ret = mtk_mutex_prepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]); + if (ret) { + dev_err(dev, "Fail to enable mutex clk\n"); + goto err_free_path; } path->mdp_dev = mdp; @@ -406,15 +410,13 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) ret = mdp_path_ctx_init(mdp, path); if (ret) { dev_err(dev, "mdp_path_ctx_init error\n"); - goto err_cmdq_data; + goto err_free_path; } - mtk_mutex_prepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]); - ret = mdp_path_config(mdp, cmd, path); if (ret) { dev_err(dev, "mdp_path_config error\n"); - goto err_cmdq_data; + goto err_free_path; } cmdq_pkt_finalize(&cmd->pkt); @@ -431,10 +433,8 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) cmd->mdp_ctx = param->mdp_ctx; ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd->comps, cmd->num_comps); - if (ret) { - dev_err(dev, "comp %d failed to enable clock!\n", ret); - goto err_clock_off; - } + if (ret) + goto err_free_path; dma_sync_single_for_device(mdp->cmdq_clt->chan->mbox->dev, cmd->pkt.pa_base, cmd->pkt.cmd_buf_size, @@ -450,17 +450,20 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) return 0; err_clock_off: - mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]); mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, cmd->num_comps); -err_cmdq_data: +err_free_path: + mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]); kfree(path); - atomic_dec(&mdp->job_count); - wake_up(&mdp->callback_wq); - if (cmd && cmd->pkt.buf_size > 0) - mdp_cmdq_pkt_destroy(&cmd->pkt); +err_free_comps: kfree(comps); +err_destroy_pkt: + mdp_cmdq_pkt_destroy(&cmd->pkt); +err_free_cmd: kfree(cmd); +err_cancel_job: + atomic_dec(&mdp->job_count); + return ret; } EXPORT_SYMBOL_GPL(mdp_cmdq_send); diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c index d3eaf8884412..7bc05f42a23c 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c @@ -699,12 +699,22 @@ int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp) dev_err(dev, "Failed to enable clk %d. type:%d id:%d\n", i, comp->type, comp->id); - pm_runtime_put(comp->comp_dev); - return ret; + goto err_revert; } } return 0; + +err_revert: + while (--i >= 0) { + if (IS_ERR_OR_NULL(comp->clks[i])) + continue; + clk_disable_unprepare(comp->clks[i]); + } + if (comp->comp_dev) + pm_runtime_put_sync(comp->comp_dev); + + return ret; } void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp) @@ -723,11 +733,13 @@ void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp) int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num) { - int i; + int i, ret; - for (i = 0; i < num; i++) - if (mdp_comp_clock_on(dev, &comps[i]) != 0) - return ++i; + for (i = 0; i < num; i++) { + ret = mdp_comp_clock_on(dev, &comps[i]); + if (ret) + return ret; + } return 0; } diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c index c413e59d4286..2d1f6ae9f080 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c @@ -196,27 +196,27 @@ static int mdp_probe(struct platform_device *pdev) mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MMSYS); if (!mm_pdev) { ret = -ENODEV; - goto err_return; + goto err_destroy_device; } mdp->mdp_mmsys = &mm_pdev->dev; mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MUTEX); if (WARN_ON(!mm_pdev)) { ret = -ENODEV; - goto err_return; + goto err_destroy_device; } for (i = 0; i < MDP_PIPE_MAX; i++) { mdp->mdp_mutex[i] = mtk_mutex_get(&mm_pdev->dev); if (!mdp->mdp_mutex[i]) { ret = -ENODEV; - goto err_return; + goto err_free_mutex; } } ret = mdp_comp_config(mdp); if (ret) { dev_err(dev, "Failed to config mdp components\n"); - goto err_return; + goto err_free_mutex; } mdp->job_wq = alloc_workqueue(MDP_MODULE_NAME, WQ_FREEZABLE, 0); @@ -287,11 +287,12 @@ err_destroy_job_wq: destroy_workqueue(mdp->job_wq); err_deinit_comp: mdp_comp_destroy(mdp); -err_return: +err_free_mutex: for (i = 0; i < MDP_PIPE_MAX; i++) - if (mdp) - mtk_mutex_put(mdp->mdp_mutex[i]); + mtk_mutex_put(mdp->mdp_mutex[i]); +err_destroy_device: kfree(mdp); +err_return: dev_dbg(dev, "Errno %d\n", ret); return ret; } diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c index c45bd2599bb2..ffbcee04dc26 100644 --- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c +++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c @@ -138,10 +138,13 @@ static void mtk_vdec_stateless_cap_to_disp(struct mtk_vcodec_ctx *ctx, int error state = VB2_BUF_STATE_DONE; vb2_dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); - v4l2_m2m_buf_done(vb2_dst, state); - - mtk_v4l2_debug(2, "free frame buffer id:%d to done list", - vb2_dst->vb2_buf.index); + if (vb2_dst) { + v4l2_m2m_buf_done(vb2_dst, state); + mtk_v4l2_debug(2, "free frame buffer id:%d to done list", + vb2_dst->vb2_buf.index); + } else { + mtk_v4l2_err("dst buffer is NULL"); + } if (src_buf_req) v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl); @@ -250,7 +253,7 @@ static void mtk_vdec_worker(struct work_struct *work) state = ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE; if (!IS_VDEC_LAT_ARCH(dev->vdec_pdata->hw_arch) || - ctx->current_codec == V4L2_PIX_FMT_VP8_FRAME || ret) { + ctx->current_codec == V4L2_PIX_FMT_VP8_FRAME) { v4l2_m2m_buf_done_and_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx, state); if (src_buf_req) v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl); diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c index d810a78dde51..d65800a3b89d 100644 --- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c +++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c @@ -1397,7 +1397,10 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx) 0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE); v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, - 0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH); + ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)), + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH); v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, h264_max_level, 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0); diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c index 4cc92700692b..955b2d0c8f53 100644 --- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c +++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c @@ -471,14 +471,19 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf) sizeof(share_info->h264_slice_params)); fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx); - y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; - vdec_fb_va = (unsigned long)fb; + if (!fb) { + err = -EBUSY; + mtk_vcodec_err(inst, "fb buffer is NULL"); + goto vdec_dec_end; + } + vdec_fb_va = (unsigned long)fb; + y_fb_dma = (u64)fb->base_y.dma_addr; if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1) c_fb_dma = y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h; else - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + c_fb_dma = (u64)fb->base_c.dma_addr; mtk_vcodec_debug(inst, "[h264-core] y/c addr = 0x%llx 0x%llx", y_fb_dma, c_fb_dma); @@ -539,6 +544,29 @@ vdec_dec_end: return 0; } +static void vdec_h264_insert_startcode(struct mtk_vcodec_dev *vcodec_dev, unsigned char *buf, + size_t *bs_size, struct mtk_h264_pps_param *pps) +{ + struct device *dev = &vcodec_dev->plat_dev->dev; + + /* Need to add pending data at the end of bitstream when bs_sz is small than + * 20 bytes for cavlc bitstream, or lat will decode fail. This pending data is + * useful for mt8192 and mt8195 platform. + * + * cavlc bitstream when entropy_coding_mode_flag is false. + */ + if (pps->entropy_coding_mode_flag || *bs_size > 20 || + !(of_device_is_compatible(dev->of_node, "mediatek,mt8192-vcodec-dec") || + of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-dec"))) + return; + + buf[*bs_size] = 0; + buf[*bs_size + 1] = 0; + buf[*bs_size + 2] = 1; + buf[*bs_size + 3] = 0xff; + (*bs_size) += 4; +} + static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs, struct vdec_fb *fb, bool *res_chg) { @@ -582,9 +610,6 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs, } inst->vsi->dec.nal_info = buf[nal_start_idx]; - inst->vsi->dec.bs_buf_addr = (u64)bs->dma_addr; - inst->vsi->dec.bs_buf_size = bs->size; - lat_buf->src_buf_req = src_buf_info->m2m_buf.vb.vb2_buf.req_obj.req; v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb, &lat_buf->ts_info, true); @@ -592,6 +617,12 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs, if (err) goto err_free_fb_out; + vdec_h264_insert_startcode(inst->ctx->dev, buf, &bs->size, + &share_info->h264_slice_params.pps); + + inst->vsi->dec.bs_buf_addr = (uint64_t)bs->dma_addr; + inst->vsi->dec.bs_buf_size = bs->size; + *res_chg = inst->resolution_changed; if (inst->resolution_changed) { mtk_vcodec_debug(inst, "- resolution changed -"); @@ -630,7 +661,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs, err = vpu_dec_start(vpu, data, 2); if (err) { mtk_vcodec_debug(inst, "lat decode err: %d", err); - goto err_scp_decode; + goto err_free_fb_out; } share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr + @@ -647,12 +678,17 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs, /* wait decoder done interrupt */ timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED, WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0); + if (timeout) + mtk_vcodec_err(inst, "lat decode timeout: pic_%d", inst->slice_dec_num); inst->vsi->dec.timeout = !!timeout; err = vpu_dec_end(vpu); - if (err == SLICE_HEADER_FULL || timeout || err == TRANS_BUFFER_FULL) { - err = -EINVAL; - goto err_scp_decode; + if (err == SLICE_HEADER_FULL || err == TRANS_BUFFER_FULL) { + if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) + vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf); + inst->slice_dec_num++; + mtk_vcodec_err(inst, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err); + return -EINVAL; } share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr + @@ -669,10 +705,6 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs, inst->slice_dec_num++; return 0; - -err_scp_decode: - if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) - vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf); err_free_fb_out: vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf); mtk_vcodec_err(inst, "slice dec number: %d err: %d", inst->slice_dec_num, err); diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c index fb1c36a3592d..cbb6728b8a40 100644 --- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c +++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c @@ -2073,21 +2073,23 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs, return -EBUSY; } pfc = (struct vdec_vp9_slice_pfc *)lat_buf->private_data; - if (!pfc) - return -EINVAL; + if (!pfc) { + ret = -EINVAL; + goto err_free_fb_out; + } vsi = &pfc->vsi; ret = vdec_vp9_slice_setup_lat(instance, bs, lat_buf, pfc); if (ret) { mtk_vcodec_err(instance, "Failed to setup VP9 lat ret %d\n", ret); - return ret; + goto err_free_fb_out; } vdec_vp9_slice_vsi_to_remote(vsi, instance->vsi); ret = vpu_dec_start(&instance->vpu, NULL, 0); if (ret) { mtk_vcodec_err(instance, "Failed to dec VP9 ret %d\n", ret); - return ret; + goto err_free_fb_out; } if (instance->irq) { @@ -2107,7 +2109,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs, /* LAT trans full, no more UBE or decode timeout */ if (ret) { mtk_vcodec_err(instance, "VP9 decode error: %d\n", ret); - return ret; + goto err_free_fb_out; } mtk_vcodec_debug(instance, "lat dma addr: 0x%lx 0x%lx\n", @@ -2120,6 +2122,9 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs, vdec_msg_queue_qbuf(&ctx->dev->msg_queue_core_ctx, lat_buf); return 0; +err_free_fb_out: + vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf); + return ret; } static int vdec_vp9_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c index ae500980ad45..dc2004790a47 100644 --- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c +++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c @@ -221,7 +221,7 @@ static void vdec_msg_queue_core_work(struct work_struct *work) mtk_vcodec_dec_disable_hardware(ctx, MTK_VDEC_CORE); vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf); - if (!list_empty(&ctx->msg_queue.lat_ctx.ready_queue)) { + if (!list_empty(&dev->msg_queue_core_ctx.ready_queue)) { mtk_v4l2_debug(3, "re-schedule to decode for core: %d", dev->msg_queue_core_ctx.ready_num); queue_work(dev->core_workqueue, &msg_queue->core_work); |