diff options
Diffstat (limited to 'drivers/media/platform')
110 files changed, 2297 insertions, 6511 deletions
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index a505e9f5a1e2..4acbed189644 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -141,7 +141,6 @@ config VIDEO_RENESAS_CEU ---help--- This is a v4l2 driver for the Renesas CEU Interface -source "drivers/media/platform/soc_camera/Kconfig" source "drivers/media/platform/exynos4-is/Kconfig" source "drivers/media/platform/am437x/Kconfig" source "drivers/media/platform/xilinx/Kconfig" @@ -650,7 +649,7 @@ config VIDEO_SECO_CEC config VIDEO_SECO_RC bool "SECO Boards IR RC5 support" depends on VIDEO_SECO_CEC - select RC_CORE + depends on RC_CORE help If you say yes here you will get support for the SECO Boards Consumer-IR in seco-cec driver. @@ -669,7 +668,7 @@ menuconfig SDR_PLATFORM_DRIVERS if SDR_PLATFORM_DRIVERS config VIDEO_RCAR_DRIF - tristate "Renesas Digitial Radio Interface (DRIF)" + tristate "Renesas Digital Radio Interface (DRIF)" depends on VIDEO_V4L2 depends on ARCH_RENESAS || COMPILE_TEST select VIDEOBUF2_VMALLOC diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index e6deb2597738..7cbbd925124c 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -62,8 +62,6 @@ obj-y += davinci/ obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o -obj-$(CONFIG_SOC_CAMERA) += soc_camera/ - obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index dfec813f50a9..692e08ef38c0 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -1661,6 +1661,7 @@ static int aspeed_video_probe(struct platform_device *pdev) video->frame_rate = 30; video->dev = &pdev->dev; + spin_lock_init(&video->lock); mutex_init(&video->video_lock); init_waitqueue_head(&video->wait); INIT_DELAYED_WORK(&video->res_work, aspeed_video_resolution_work); diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c index fdb255e4a956..08b8d5583080 100644 --- a/drivers/media/platform/atmel/atmel-isi.c +++ b/drivers/media/platform/atmel/atmel-isi.c @@ -110,7 +110,7 @@ struct atmel_isi { bool enable_preview_path; struct completion complete; - /* ISI peripherial clock */ + /* ISI peripheral clock */ struct clk *pclk; unsigned int irq; @@ -1078,7 +1078,7 @@ static void isi_graph_notify_unbind(struct v4l2_async_notifier *notifier, dev_dbg(isi->dev, "Removing %s\n", video_device_node_name(isi->vdev)); - /* Checks internaly if vdev have been init or not */ + /* Checks internally if vdev have been init or not */ video_unregister_device(isi->vdev); } diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index 8e0194993a52..b4f396c2e72c 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -1010,7 +1010,11 @@ static int coda_start_encoding(struct coda_ctx *ctx) CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) | ((ctx->params.h264_slice_beta_offset_div2 & CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) << - CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET); + CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET) | + (ctx->params.h264_constrained_intra_pred_flag << + CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET) | + (ctx->params.h264_chroma_qp_index_offset & + CODA_264PARAM_CHROMAQPOFFSET_MASK); coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA); break; case V4L2_PIX_FMT_JPEG: @@ -2020,7 +2024,6 @@ static void coda_finish_decode(struct coda_ctx *ctx) struct coda_q_data *q_data_dst; struct vb2_v4l2_buffer *dst_buf; struct coda_buffer_meta *meta; - unsigned long payload; int width, height; int decoded_idx; int display_idx; @@ -2226,21 +2229,8 @@ static void coda_finish_decode(struct coda_ctx *ctx) trace_coda_dec_rot_done(ctx, dst_buf, meta); - switch (q_data_dst->fourcc) { - case V4L2_PIX_FMT_YUYV: - payload = width * height * 2; - break; - case V4L2_PIX_FMT_YUV420: - case V4L2_PIX_FMT_YVU420: - case V4L2_PIX_FMT_NV12: - default: - payload = width * height * 3 / 2; - break; - case V4L2_PIX_FMT_YUV422P: - payload = width * height * 2; - break; - } - vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload); + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, + q_data_dst->sizeimage); if (ctx->frame_errors[ctx->display_idx] || err_vdoa) coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR); diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 7518f01c48f7..fa0b22fb7991 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -728,7 +728,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f, ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP; break; case V4L2_PIX_FMT_NV12: - if (!disable_tiling) { + if (!disable_tiling && ctx->dev->devtype->product == CODA_960) { ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP; break; } @@ -1839,6 +1839,12 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: ctx->params.h264_disable_deblocking_filter_idc = ctrl->val; break; + case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION: + ctx->params.h264_constrained_intra_pred_flag = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: + ctx->params.h264_chroma_qp_index_offset = ctrl->val; + break; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: /* TODO: switch between baseline and constrained baseline */ if (ctx->inst_type == CODA_INST_ENCODER) @@ -1925,6 +1931,11 @@ static void coda_encode_ctrls(struct coda_ctx *ctx) V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY, 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED); + v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION, 0, 1, 1, + 0); + v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0); v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0, diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c index 9f899a6cefed..39a2351c1e48 100644 --- a/drivers/media/platform/coda/coda-jpeg.c +++ b/drivers/media/platform/coda/coda-jpeg.c @@ -103,7 +103,7 @@ static const unsigned char chroma_ac_value[162 + 2] = { /* * Quantization tables for luminance and chrominance components in - * zig-zag scan order from the Freescale i.MX VPU libaries + * zig-zag scan order from the Freescale i.MX VPU libraries */ static unsigned char luma_q[64] = { diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h index 31cea72f5b2a..31c80bda2c0b 100644 --- a/drivers/media/platform/coda/coda.h +++ b/drivers/media/platform/coda/coda.h @@ -118,6 +118,8 @@ struct coda_params { u8 h264_disable_deblocking_filter_idc; s8 h264_slice_alpha_c0_offset_div2; s8 h264_slice_beta_offset_div2; + bool h264_constrained_intra_pred_flag; + s8 h264_chroma_qp_index_offset; u8 h264_profile_idc; u8 h264_level_idc; u8 mpeg4_intra_qp; diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c index 340f8218f54d..47cecd10eb9f 100644 --- a/drivers/media/platform/davinci/isif.c +++ b/drivers/media/platform/davinci/isif.c @@ -328,7 +328,7 @@ static void isif_config_bclamp(struct isif_black_clamp *bc) if (bc->en) { val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT; - /* Enable BC and horizontal clamp caculation paramaters */ + /* Enable BC and horizontal clamp calculation parameters */ val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT); regw(val, CLAMPCFG); @@ -358,7 +358,7 @@ static void isif_config_bclamp(struct isif_black_clamp *bc) regw(bc->horz.win_start_v_calc, CLHWIN2); } - /* vertical clamp caculation paramaters */ + /* vertical clamp calculation parameters */ /* Reset clamp value sel for previous line */ val |= diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c index 4766a7a23d16..8339163a5231 100644 --- a/drivers/media/platform/davinci/vpbe.c +++ b/drivers/media/platform/davinci/vpbe.c @@ -242,7 +242,7 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index) goto unlock; /* - * It is assumed that venc or extenal encoder will set a default + * It is assumed that venc or external encoder will set a default * mode in the sub device. For external encoder or LCD pannel output, * we also need to set up the lcd port for the required mode. So setup * the lcd port for the default mode that is configured in the board diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index 9996bab98fe3..26dadbba930f 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -518,7 +518,7 @@ static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev) static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev) { - v4l2_get_timestamp(&vpfe_dev->cur_frm->ts); + vpfe_dev->cur_frm->ts = ktime_get_ns(); vpfe_dev->cur_frm->state = VIDEOBUF_DONE; vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage; wake_up_interruptible(&vpfe_dev->cur_frm->done); diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c index 16352e2263d2..df66461f5d4f 100644 --- a/drivers/media/platform/davinci/vpif.c +++ b/drivers/media/platform/davinci/vpif.c @@ -1,7 +1,7 @@ /* * vpif - Video Port Interface driver * VPIF is a receiver and transmitter for video data. It has two channels(0, 1) - * that receiveing video byte stream and two channels(2, 3) for video output. + * that receiving video byte stream and two channels(2, 3) for video output. * The hardware supports SDTV, HDTV formats, raw data capture. * Currently, the driver supports NTSC and PAL standards. * diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index 3517487d9760..f4b4f2a1dfc0 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -138,7 +138,7 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq, * vpif_buffer_queue : Callback function to add buffer to DMA queue * @vb: ptr to vb2_buffer * - * This callback fucntion queues the buffer to DMA engine + * This callback function queues the buffer to DMA engine */ static void vpif_buffer_queue(struct vb2_buffer *vb) { @@ -635,7 +635,7 @@ static int vpif_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; /* - * to supress v4l-compliance warnings silently correct + * to suppress v4l-compliance warnings silently correct * the pixelformat */ if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) diff --git a/drivers/media/platform/exynos4-is/fimc-is-command.h b/drivers/media/platform/exynos4-is/fimc-is-command.h index 0d1f52e394b1..b06b56b890d5 100644 --- a/drivers/media/platform/exynos4-is/fimc-is-command.h +++ b/drivers/media/platform/exynos4-is/fimc-is-command.h @@ -18,7 +18,7 @@ #define FIMC_IS_COMMAND_VER 110 /* FIMC-IS command set version 1.10 */ -/* Enumeration of commands beetween the FIMC-IS and the host processor. */ +/* Enumeration of commands between the FIMC-IS and the host processor. */ /* HOST to FIMC-IS */ #define HIC_PREVIEW_STILL 0x0001 diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.h b/drivers/media/platform/exynos4-is/fimc-is-param.h index 8e31f7642776..22923a3d405e 100644 --- a/drivers/media/platform/exynos4-is/fimc-is-param.h +++ b/drivers/media/platform/exynos4-is/fimc-is-param.h @@ -298,7 +298,7 @@ enum isp_af_mode { #define ISP_FLASH_COMMAND_AUTO 2 #define ISP_FLASH_COMMAND_TORCH 3 /* 3 sec */ -/* Flash red-eye commads */ +/* Flash red-eye commands */ #define ISP_FLASH_REDEYE_DISABLE 0 #define ISP_FLASH_REDEYE_ENABLE 1 diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index f5fc54de19da..02da0b06e56a 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -738,7 +738,7 @@ int fimc_is_hw_initialize(struct fimc_is *is) return 0; } -static int fimc_is_log_show(struct seq_file *s, void *data) +static int fimc_is_show(struct seq_file *s, void *data) { struct fimc_is *is = s->private; const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET; @@ -752,17 +752,7 @@ static int fimc_is_log_show(struct seq_file *s, void *data) return 0; } -static int fimc_is_debugfs_open(struct inode *inode, struct file *file) -{ - return single_open(file, fimc_is_log_show, inode->i_private); -} - -static const struct file_operations fimc_is_debugfs_fops = { - .open = fimc_is_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(fimc_is); static void fimc_is_debugfs_remove(struct fimc_is *is) { @@ -777,7 +767,7 @@ static int fimc_is_debugfs_create(struct fimc_is *is) is->debugfs_entry = debugfs_create_dir("fimc_is", NULL); dentry = debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry, - is, &fimc_is_debugfs_fops); + is, &fimc_is_fops); if (!dentry) fimc_is_debugfs_remove(is); diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c index de6bd28f7e31..bb35a2017f21 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c @@ -606,9 +606,7 @@ int fimc_isp_video_device_register(struct fimc_isp *isp, vdev = &iv->ve.vdev; memset(vdev, 0, sizeof(*vdev)); - snprintf(vdev->name, sizeof(vdev->name), "fimc-is-isp.%s", - type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? - "capture" : "output"); + strscpy(vdev->name, "fimc-is-isp.capture", sizeof(vdev->name)); vdev->queue = q; vdev->fops = &isp_video_fops; vdev->ioctl_ops = &isp_video_ioctl_ops; diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h index 9f527670395a..a7c9490bbcb4 100644 --- a/drivers/media/platform/exynos4-is/media-dev.h +++ b/drivers/media/platform/exynos4-is/media-dev.h @@ -79,7 +79,7 @@ struct fimc_camclk_info { /** * struct fimc_sensor_info - image data source subdev information - * @pdata: sensor's atrributes passed as media device's platform data + * @pdata: sensor's attributes passed as media device's platform data * @asd: asynchronous subdev registration data structure * @subdev: image sensor v4l2 subdev * @host: fimc device the sensor is currently linked to diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c index 35cb0162085b..234e047e3e8f 100644 --- a/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/exynos4-is/mipi-csis.c @@ -183,7 +183,7 @@ struct csis_drvdata { * @index: the hardware instance index * @pdev: CSIS platform device * @phy: pointer to the CSIS generic PHY - * @regs: mmaped I/O registers memory + * @regs: mmapped I/O registers memory * @supplies: CSIS regulator supplies * @clock: CSIS clocks * @irq: requested s5p-mipi-csis irq number @@ -745,7 +745,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, goto err; } - /* Get MIPI CSI-2 bus configration from the endpoint node. */ + /* Get MIPI CSI-2 bus configuration from the endpoint node. */ of_property_read_u32(node, "samsung,csis-hs-settle", &state->hs_settle); state->wclk_ext = of_property_read_bool(node, diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c index ca6d0317ab42..cffebcaacb90 100644 --- a/drivers/media/platform/fsl-viu.c +++ b/drivers/media/platform/fsl-viu.c @@ -1090,7 +1090,7 @@ static void viu_capture_intr(struct viu_dev *dev, u32 status) if (waitqueue_active(&buf->vb.done)) { list_del(&buf->vb.queue); - v4l2_get_timestamp(&buf->vb.ts); + buf->vb.ts = ktime_get_ns(); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; wake_up(&buf->vb.done); diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c index c1c255408d16..0bcfc5aa8f3d 100644 --- a/drivers/media/platform/imx-pxp.c +++ b/drivers/media/platform/imx-pxp.c @@ -90,7 +90,11 @@ static struct pxp_fmt formats[] = { .depth = 16, .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { - .fourcc = V4L2_PIX_FMT_YUV32, + .fourcc = V4L2_PIX_FMT_VUYA32, + .depth = 32, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_VUYX32, .depth = 32, .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { @@ -236,7 +240,7 @@ static u32 pxp_v4l2_pix_fmt_to_ps_format(u32 v4l2_pix_fmt) case V4L2_PIX_FMT_RGB555: return BV_PXP_PS_CTRL_FORMAT__RGB555; case V4L2_PIX_FMT_RGB444: return BV_PXP_PS_CTRL_FORMAT__RGB444; case V4L2_PIX_FMT_RGB565: return BV_PXP_PS_CTRL_FORMAT__RGB565; - case V4L2_PIX_FMT_YUV32: return BV_PXP_PS_CTRL_FORMAT__YUV1P444; + case V4L2_PIX_FMT_VUYX32: return BV_PXP_PS_CTRL_FORMAT__YUV1P444; case V4L2_PIX_FMT_UYVY: return BV_PXP_PS_CTRL_FORMAT__UYVY1P422; case V4L2_PIX_FMT_YUYV: return BM_PXP_PS_CTRL_WB_SWAP | BV_PXP_PS_CTRL_FORMAT__UYVY1P422; @@ -265,7 +269,8 @@ static u32 pxp_v4l2_pix_fmt_to_out_format(u32 v4l2_pix_fmt) case V4L2_PIX_FMT_RGB555: return BV_PXP_OUT_CTRL_FORMAT__RGB555; case V4L2_PIX_FMT_RGB444: return BV_PXP_OUT_CTRL_FORMAT__RGB444; case V4L2_PIX_FMT_RGB565: return BV_PXP_OUT_CTRL_FORMAT__RGB565; - case V4L2_PIX_FMT_YUV32: return BV_PXP_OUT_CTRL_FORMAT__YUV1P444; + case V4L2_PIX_FMT_VUYA32: + case V4L2_PIX_FMT_VUYX32: return BV_PXP_OUT_CTRL_FORMAT__YUV1P444; case V4L2_PIX_FMT_UYVY: return BV_PXP_OUT_CTRL_FORMAT__UYVY1P422; case V4L2_PIX_FMT_VYUY: return BV_PXP_OUT_CTRL_FORMAT__VYUY1P422; case V4L2_PIX_FMT_GREY: return BV_PXP_OUT_CTRL_FORMAT__Y8; @@ -281,7 +286,8 @@ static u32 pxp_v4l2_pix_fmt_to_out_format(u32 v4l2_pix_fmt) static bool pxp_v4l2_pix_fmt_is_yuv(u32 v4l2_pix_fmt) { switch (v4l2_pix_fmt) { - case V4L2_PIX_FMT_YUV32: + case V4L2_PIX_FMT_VUYA32: + case V4L2_PIX_FMT_VUYX32: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_VYUY: @@ -680,7 +686,7 @@ static void pxp_setup_csc(struct pxp_ctx *ctx) csc2_coef = csc2_coef_rec709_full; else csc2_coef = csc2_coef_rec709_lim; - } else if (ycbcr_enc == V4L2_YCBCR_ENC_709) { + } else if (ycbcr_enc == V4L2_YCBCR_ENC_BT2020) { if (quantization == V4L2_QUANTIZATION_FULL_RANGE) csc2_coef = csc2_coef_bt2020_full; else diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c index 70a2833db0d1..af76eb637773 100644 --- a/drivers/media/platform/marvell-ccic/mmp-driver.c +++ b/drivers/media/platform/marvell-ccic/mmp-driver.c @@ -240,8 +240,8 @@ static void mmpcam_calc_dphy(struct mcam_camera *mcam) * bit 8 ~ bit 15: HS_SETTLE * Time interval during which the HS * receiver shall ignore any Data Lane - * HS transistions. - * The vaule has been calibrated on + * HS transitions. + * The value has been calibrated on * different boards. It seems to work well. * * More detail please refer diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c index 2a5d5002c27e..f761e4d8bf2a 100644 --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c @@ -702,7 +702,7 @@ end: v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb)); } -static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, +static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, enum v4l2_buf_type type) { if (V4L2_TYPE_IS_OUTPUT(type)) @@ -714,7 +714,7 @@ static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) { struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; int ret = 0; ret = pm_runtime_get_sync(ctx->jpeg->dev); @@ -724,14 +724,14 @@ static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) return 0; err: while ((vb = mtk_jpeg_buf_remove(ctx, q->type))) - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_QUEUED); + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED); return ret; } static void mtk_jpeg_stop_streaming(struct vb2_queue *q) { struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; /* * STREAMOFF is an acknowledgment for source change event. @@ -743,7 +743,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q) struct mtk_jpeg_src_buf *src_buf; vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); - src_buf = mtk_jpeg_vb2_to_srcbuf(vb); + src_buf = mtk_jpeg_vb2_to_srcbuf(&vb->vb2_buf); mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param); ctx->state = MTK_JPEG_RUNNING; } else if (V4L2_TYPE_IS_OUTPUT(q->type)) { @@ -751,7 +751,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q) } while ((vb = mtk_jpeg_buf_remove(ctx, q->type))) - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR); pm_runtime_put_sync(ctx->jpeg->dev); } @@ -807,7 +807,7 @@ static void mtk_jpeg_device_run(void *priv) { struct mtk_jpeg_ctx *ctx = priv; struct mtk_jpeg_dev *jpeg = ctx->jpeg; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; unsigned long flags; struct mtk_jpeg_src_buf *jpeg_src_buf; @@ -817,11 +817,11 @@ static void mtk_jpeg_device_run(void *priv) src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf); + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf); if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) { - for (i = 0; i < dst_buf->num_planes; i++) - vb2_set_plane_payload(dst_buf, i, 0); + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++) + vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0); buf_state = VB2_BUF_STATE_DONE; goto dec_end; } @@ -833,8 +833,8 @@ static void mtk_jpeg_device_run(void *priv) return; } - mtk_jpeg_set_dec_src(ctx, src_buf, &bs); - if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, dst_buf, &fb)) + mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs); + if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb)) goto dec_end; spin_lock_irqsave(&jpeg->hw_lock, flags); @@ -849,8 +849,8 @@ static void mtk_jpeg_device_run(void *priv) dec_end: v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state); + v4l2_m2m_buf_done(src_buf, buf_state); + v4l2_m2m_buf_done(dst_buf, buf_state); v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); } @@ -921,7 +921,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv) { struct mtk_jpeg_dev *jpeg = priv; struct mtk_jpeg_ctx *ctx; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct mtk_jpeg_src_buf *jpeg_src_buf; enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; u32 dec_irq_ret; @@ -938,7 +938,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv) src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf); + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf); if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW) mtk_jpeg_dec_reset(jpeg->dec_reg_base); @@ -948,15 +948,15 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv) goto dec_end; } - for (i = 0; i < dst_buf->num_planes; i++) - vb2_set_plane_payload(dst_buf, i, + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++) + vb2_set_plane_payload(&dst_buf->vb2_buf, i, jpeg_src_buf->dec_param.comp_size[i]); buf_state = VB2_BUF_STATE_DONE; dec_end: - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state); + v4l2_m2m_buf_done(src_buf, buf_state); + v4l2_m2m_buf_done(dst_buf, buf_state); v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); return IRQ_HANDLED; } diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h index ad1cff306efd..e5abb1abb3a3 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h @@ -41,7 +41,7 @@ #define MTK_MDP_CTX_ERROR BIT(5) /** - * struct mtk_mdp_pix_align - alignement of image + * struct mtk_mdp_pix_align - alignment of image * @org_w: source alignment of width * @org_h: source alignment of height * @target_w: dst alignment of width @@ -122,8 +122,8 @@ struct mtk_mdp_frame { /** * struct mtk_mdp_variant - image processor variant information * @pix_max: maximum limit of image size - * @pix_min: minimun limit of image size - * @pix_align: alignement of image + * @pix_min: minimum limit of image size + * @pix_align: alignment of image * @h_scale_up_max: maximum scale-up in horizontal * @v_scale_up_max: maximum scale-up in vertical * @h_scale_down_max: maximum scale-down in horizontal diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c index 51a13466261e..7d15c06e9db9 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c @@ -473,20 +473,17 @@ static void mtk_mdp_prepare_addr(struct mtk_mdp_ctx *ctx, static void mtk_mdp_m2m_get_bufs(struct mtk_mdp_ctx *ctx) { struct mtk_mdp_frame *s_frame, *d_frame; - struct vb2_buffer *src_vb, *dst_vb; struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf; s_frame = &ctx->s_frame; d_frame = &ctx->d_frame; - src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx); - mtk_mdp_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr); + src_vbuf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); + mtk_mdp_prepare_addr(ctx, &src_vbuf->vb2_buf, s_frame, &s_frame->addr); - dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); - mtk_mdp_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr); + dst_vbuf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); + mtk_mdp_prepare_addr(ctx, &dst_vbuf->vb2_buf, d_frame, &d_frame->addr); - src_vbuf = to_vb2_v4l2_buffer(src_vb); - dst_vbuf = to_vb2_v4l2_buffer(dst_vb); dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp; } @@ -494,17 +491,14 @@ static void mtk_mdp_process_done(void *priv, int vb_state) { struct mtk_mdp_dev *mdp = priv; struct mtk_mdp_ctx *ctx; - struct vb2_buffer *src_vb, *dst_vb; - struct vb2_v4l2_buffer *src_vbuf = NULL, *dst_vbuf = NULL; + struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf; ctx = v4l2_m2m_get_curr_priv(mdp->m2m_dev); if (!ctx) return; - src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); - src_vbuf = to_vb2_v4l2_buffer(src_vb); - dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); - dst_vbuf = to_vb2_v4l2_buffer(dst_vb); + src_vbuf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + dst_vbuf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp; dst_vbuf->timecode = src_vbuf->timecode; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c index ba619647bc10..d022c65bb34c 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c @@ -325,13 +325,12 @@ static void mtk_vdec_worker(struct work_struct *work) struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx, decode_work); struct mtk_vcodec_dev *dev = ctx->dev; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct mtk_vcodec_mem buf; struct vdec_fb *pfb; bool res_chg = false; int ret; struct mtk_video_dec_buf *dst_buf_info, *src_buf_info; - struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2; src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); if (src_buf == NULL) { @@ -347,26 +346,23 @@ static void mtk_vdec_worker(struct work_struct *work) return; } - src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf); - src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb); - - dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf); - dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb); + src_buf_info = container_of(src_buf, struct mtk_video_dec_buf, vb); + dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf, vb); pfb = &dst_buf_info->frame_buffer; - pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0); - pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0); + pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz; - pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1); - pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1); + pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1); + pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1); pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz; pfb->status = 0; mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id); mtk_v4l2_debug(3, "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx", - dst_buf->index, pfb, + dst_buf->vb2_buf.index, pfb, pfb->base_y.va, &pfb->base_y.dma_addr, &pfb->base_c.dma_addr, pfb->base_y.size); @@ -384,19 +380,19 @@ static void mtk_vdec_worker(struct work_struct *work) clean_display_buffer(ctx); vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0); vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0); - dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_LAST; + dst_buf->flags |= V4L2_BUF_FLAG_LAST; v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE); clean_free_buffer(ctx); v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); return; } - buf.va = vb2_plane_vaddr(src_buf, 0); - buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); + buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0); + buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); buf.size = (size_t)src_buf->planes[0].bytesused; if (!buf.va) { v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); mtk_v4l2_err("[%d] id=%d src_addr is NULL!!", - ctx->id, src_buf->index); + ctx->id, src_buf->vb2_buf.index); return; } mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p", @@ -416,10 +412,10 @@ static void mtk_vdec_worker(struct work_struct *work) mtk_v4l2_err( " <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>", ctx->id, - src_buf->index, + src_buf->vb2_buf.index, buf.size, src_buf_info->vb.vb2_buf.timestamp, - dst_buf->index, + dst_buf->vb2_buf.index, ret, res_chg); src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); if (ret == -EIO) { @@ -1103,7 +1099,7 @@ static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb) static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) { - struct vb2_buffer *src_buf; + struct vb2_v4l2_buffer *src_buf; struct mtk_vcodec_mem src_mem; bool res_chg = false; int ret = 0; @@ -1149,8 +1145,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) mtk_v4l2_err("No src buffer"); return; } - vb2_v4l2 = to_vb2_v4l2_buffer(src_buf); - buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb); + buf = container_of(src_buf, struct mtk_video_dec_buf, vb); if (buf->lastframe) { /* This shouldn't happen. Just in case. */ mtk_v4l2_err("Invalid flush buffer."); @@ -1158,8 +1153,8 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) return; } - src_mem.va = vb2_plane_vaddr(src_buf, 0); - src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); + src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0); + src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); src_mem.size = (size_t)src_buf->planes[0].bytesused; mtk_v4l2_debug(2, "[%d] buf id=%d va=%p dma=%pad size=%zx", @@ -1170,7 +1165,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg); if (ret || !res_chg) { /* - * fb == NULL menas to parse SPS/PPS header or + * fb == NULL means to parse SPS/PPS header or * resolution info in src_mem. Decode can fail * if there is no SPS header or picture info * in bs @@ -1181,11 +1176,9 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.", ctx->id); ctx->state = MTK_STATE_ABORT; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); } else { - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); } mtk_v4l2_debug(ret ? 0 : 1, "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d", @@ -1281,7 +1274,7 @@ static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count) static void vb2ops_vdec_stop_streaming(struct vb2_queue *q) { - struct vb2_buffer *src_buf = NULL, *dst_buf = NULL; + struct vb2_v4l2_buffer *src_buf = NULL, *dst_buf = NULL; struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q); mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d", @@ -1289,12 +1282,10 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q) if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) { - struct vb2_v4l2_buffer *vb2_v4l2 = - to_vb2_v4l2_buffer(src_buf); struct mtk_video_dec_buf *buf_info = container_of( - vb2_v4l2, struct mtk_video_dec_buf, vb); + src_buf, struct mtk_video_dec_buf, vb); if (!buf_info->lastframe) - v4l2_m2m_buf_done(vb2_v4l2, + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); } return; @@ -1323,10 +1314,9 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q) ctx->state = MTK_STATE_FLUSH; while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) { - vb2_set_plane_payload(dst_buf, 0, 0); - vb2_set_plane_payload(dst_buf, 1, 0); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_ERROR); + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0); + vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); } } diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c index 79ca03ac449c..7884465afcd2 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c @@ -27,11 +27,14 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev) struct device_node *node; struct platform_device *pdev; struct mtk_vcodec_pm *pm; - int ret = 0; + struct mtk_vcodec_clk *dec_clk; + struct mtk_vcodec_clk_info *clk_info; + int i = 0, ret = 0; pdev = mtkdev->plat_dev; pm = &mtkdev->pm; pm->mtkdev = mtkdev; + dec_clk = &pm->vdec_clk; node = of_parse_phandle(pdev->dev.of_node, "mediatek,larb", 0); if (!node) { mtk_v4l2_err("of_parse_phandle mediatek,larb fail!"); @@ -47,52 +50,34 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev) pdev = mtkdev->plat_dev; pm->dev = &pdev->dev; - pm->vcodecpll = devm_clk_get(&pdev->dev, "vcodecpll"); - if (IS_ERR(pm->vcodecpll)) { - mtk_v4l2_err("devm_clk_get vcodecpll fail"); - ret = PTR_ERR(pm->vcodecpll); + dec_clk->clk_num = + of_property_count_strings(pdev->dev.of_node, "clock-names"); + if (dec_clk->clk_num > 0) { + dec_clk->clk_info = devm_kcalloc(&pdev->dev, + dec_clk->clk_num, sizeof(*clk_info), + GFP_KERNEL); + if (!dec_clk->clk_info) + return -ENOMEM; + } else { + mtk_v4l2_err("Failed to get vdec clock count"); + return -EINVAL; } - pm->univpll_d2 = devm_clk_get(&pdev->dev, "univpll_d2"); - if (IS_ERR(pm->univpll_d2)) { - mtk_v4l2_err("devm_clk_get univpll_d2 fail"); - ret = PTR_ERR(pm->univpll_d2); - } - - pm->clk_cci400_sel = devm_clk_get(&pdev->dev, "clk_cci400_sel"); - if (IS_ERR(pm->clk_cci400_sel)) { - mtk_v4l2_err("devm_clk_get clk_cci400_sel fail"); - ret = PTR_ERR(pm->clk_cci400_sel); - } - - pm->vdec_sel = devm_clk_get(&pdev->dev, "vdec_sel"); - if (IS_ERR(pm->vdec_sel)) { - mtk_v4l2_err("devm_clk_get vdec_sel fail"); - ret = PTR_ERR(pm->vdec_sel); - } - - pm->vdecpll = devm_clk_get(&pdev->dev, "vdecpll"); - if (IS_ERR(pm->vdecpll)) { - mtk_v4l2_err("devm_clk_get vdecpll fail"); - ret = PTR_ERR(pm->vdecpll); - } - - pm->vencpll = devm_clk_get(&pdev->dev, "vencpll"); - if (IS_ERR(pm->vencpll)) { - mtk_v4l2_err("devm_clk_get vencpll fail"); - ret = PTR_ERR(pm->vencpll); - } - - pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel"); - if (IS_ERR(pm->venc_lt_sel)) { - mtk_v4l2_err("devm_clk_get venc_lt_sel fail"); - ret = PTR_ERR(pm->venc_lt_sel); - } - - pm->vdec_bus_clk_src = devm_clk_get(&pdev->dev, "vdec_bus_clk_src"); - if (IS_ERR(pm->vdec_bus_clk_src)) { - mtk_v4l2_err("devm_clk_get vdec_bus_clk_src"); - ret = PTR_ERR(pm->vdec_bus_clk_src); + for (i = 0; i < dec_clk->clk_num; i++) { + clk_info = &dec_clk->clk_info[i]; + ret = of_property_read_string_index(pdev->dev.of_node, + "clock-names", i, &clk_info->clk_name); + if (ret) { + mtk_v4l2_err("Failed to get clock name id = %d", i); + return ret; + } + clk_info->vcodec_clk = devm_clk_get(&pdev->dev, + clk_info->clk_name); + if (IS_ERR(clk_info->vcodec_clk)) { + mtk_v4l2_err("devm_clk_get (%d)%s fail", i, + clk_info->clk_name); + return PTR_ERR(clk_info->vcodec_clk); + } } pm_runtime_enable(&pdev->dev); @@ -125,78 +110,36 @@ void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm) void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm) { - int ret; - - ret = clk_set_rate(pm->vcodecpll, 1482 * 1000000); - if (ret) - mtk_v4l2_err("clk_set_rate vcodecpll fail %d", ret); - - ret = clk_set_rate(pm->vencpll, 800 * 1000000); - if (ret) - mtk_v4l2_err("clk_set_rate vencpll fail %d", ret); - - ret = clk_prepare_enable(pm->vcodecpll); - if (ret) - mtk_v4l2_err("clk_prepare_enable vcodecpll fail %d", ret); - - ret = clk_prepare_enable(pm->vencpll); - if (ret) - mtk_v4l2_err("clk_prepare_enable vencpll fail %d", ret); - - ret = clk_prepare_enable(pm->vdec_bus_clk_src); - if (ret) - mtk_v4l2_err("clk_prepare_enable vdec_bus_clk_src fail %d", - ret); - - ret = clk_prepare_enable(pm->venc_lt_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable venc_lt_sel fail %d", ret); - - ret = clk_set_parent(pm->venc_lt_sel, pm->vdec_bus_clk_src); - if (ret) - mtk_v4l2_err("clk_set_parent venc_lt_sel vdec_bus_clk_src fail %d", - ret); - - ret = clk_prepare_enable(pm->univpll_d2); - if (ret) - mtk_v4l2_err("clk_prepare_enable univpll_d2 fail %d", ret); - - ret = clk_prepare_enable(pm->clk_cci400_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable clk_cci400_sel fail %d", ret); - - ret = clk_set_parent(pm->clk_cci400_sel, pm->univpll_d2); - if (ret) - mtk_v4l2_err("clk_set_parent clk_cci400_sel univpll_d2 fail %d", - ret); - - ret = clk_prepare_enable(pm->vdecpll); - if (ret) - mtk_v4l2_err("clk_prepare_enable vdecpll fail %d", ret); - - ret = clk_prepare_enable(pm->vdec_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable vdec_sel fail %d", ret); - - ret = clk_set_parent(pm->vdec_sel, pm->vdecpll); - if (ret) - mtk_v4l2_err("clk_set_parent vdec_sel vdecpll fail %d", ret); + struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk; + int ret, i = 0; + + for (i = 0; i < dec_clk->clk_num; i++) { + ret = clk_prepare_enable(dec_clk->clk_info[i].vcodec_clk); + if (ret) { + mtk_v4l2_err("clk_prepare_enable %d %s fail %d", i, + dec_clk->clk_info[i].clk_name, ret); + goto error; + } + } ret = mtk_smi_larb_get(pm->larbvdec); - if (ret) + if (ret) { mtk_v4l2_err("mtk_smi_larb_get larbvdec fail %d", ret); + goto error; + } + return; +error: + for (i -= 1; i >= 0; i--) + clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk); } void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm) { + struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk; + int i = 0; + mtk_smi_larb_put(pm->larbvdec); - clk_disable_unprepare(pm->vdec_sel); - clk_disable_unprepare(pm->vdecpll); - clk_disable_unprepare(pm->univpll_d2); - clk_disable_unprepare(pm->clk_cci400_sel); - clk_disable_unprepare(pm->venc_lt_sel); - clk_disable_unprepare(pm->vdec_bus_clk_src); - clk_disable_unprepare(pm->vencpll); - clk_disable_unprepare(pm->vcodecpll); + for (i = dec_clk->clk_num - 1; i >= 0; i--) + clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk); } diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h index 3cffb381ac8e..e7e2a108def9 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h @@ -151,9 +151,9 @@ struct mtk_q_data { * @intra_period: I frame period * @gop_size: group of picture size, it's used as the intra frame period * @framerate_num: frame rate numerator. ex: framerate_num=30 and - * framerate_denom=1 menas FPS is 30 + * framerate_denom=1 means FPS is 30 * @framerate_denom: frame rate denominator. ex: framerate_num=30 and - * framerate_denom=1 menas FPS is 30 + * framerate_denom=1 means FPS is 30 * @h264_max_qp: Max value for H.264 quantization parameter * @h264_profile: V4L2 defined H.264 profile * @h264_level: V4L2 defined H.264 level @@ -176,22 +176,29 @@ struct mtk_enc_params { }; /** + * struct mtk_vcodec_clk_info - Structure used to store clock name + */ +struct mtk_vcodec_clk_info { + const char *clk_name; + struct clk *vcodec_clk; +}; + +/** + * struct mtk_vcodec_clk - Structure used to store vcodec clock information + */ +struct mtk_vcodec_clk { + struct mtk_vcodec_clk_info *clk_info; + int clk_num; +}; + +/** * struct mtk_vcodec_pm - Power management data structure */ struct mtk_vcodec_pm { - struct clk *vdec_bus_clk_src; - struct clk *vencpll; - - struct clk *vcodecpll; - struct clk *univpll_d2; - struct clk *clk_cci400_sel; - struct clk *vdecpll; - struct clk *vdec_sel; - struct clk *vencpll_d2; - struct clk *venc_sel; - struct clk *univpll1_d2; - struct clk *venc_lt_sel; + struct mtk_vcodec_clk vdec_clk; struct device *larbvdec; + + struct mtk_vcodec_clk venc_clk; struct device *larbvenc; struct device *larbvenclt; struct device *dev; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c index d1f12257bf66..c6b48b5925fb 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c @@ -393,7 +393,7 @@ static void mtk_venc_set_param(struct mtk_vcodec_ctx *ctx, param->input_yuv_fmt = VENC_YUV_FORMAT_NV21; break; default: - mtk_v4l2_err("Unsupport fourcc =%d", q_data_src->fmt->fourcc); + mtk_v4l2_err("Unsupported fourcc =%d", q_data_src->fmt->fourcc); break; } param->h264_profile = enc_params->h264_profile; @@ -887,7 +887,7 @@ err_set_param: static void vb2ops_venc_stop_streaming(struct vb2_queue *q) { struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q); - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; int ret; mtk_v4l2_debug(2, "[%d]-> type=%d", ctx->id, q->type); @@ -895,13 +895,11 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q) if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) { dst_buf->planes[0].bytesused = 0; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); } } else { while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); } if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && @@ -937,8 +935,7 @@ static int mtk_venc_encode_header(void *priv) { struct mtk_vcodec_ctx *ctx = priv; int ret; - struct vb2_buffer *src_buf, *dst_buf; - struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct mtk_vcodec_mem bs_buf; struct venc_done_result enc_result; @@ -948,14 +945,14 @@ static int mtk_venc_encode_header(void *priv) return -EINVAL; } - bs_buf.va = vb2_plane_vaddr(dst_buf, 0); - bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0); + bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); bs_buf.size = (size_t)dst_buf->planes[0].length; mtk_v4l2_debug(1, "[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu", ctx->id, - dst_buf->index, bs_buf.va, + dst_buf->vb2_buf.index, bs_buf.va, (u64)bs_buf.dma_addr, bs_buf.size); @@ -964,26 +961,23 @@ static int mtk_venc_encode_header(void *priv) NULL, &bs_buf, &enc_result); if (ret) { - dst_buf->planes[0].bytesused = 0; + dst_buf->vb2_buf.planes[0].bytesused = 0; ctx->state = MTK_STATE_ABORT; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); mtk_v4l2_err("venc_if_encode failed=%d", ret); return -EINVAL; } src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); if (src_buf) { - src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf); - dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf); - dst_buf->timestamp = src_buf->timestamp; - dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode; + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; + dst_buf->timecode = src_buf->timecode; } else { mtk_v4l2_err("No timestamp for the header buffer."); } ctx->state = MTK_STATE_HEADER; dst_buf->planes[0].bytesused = enc_result.bs_size; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); return 0; } @@ -991,9 +985,7 @@ static int mtk_venc_encode_header(void *priv) static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx) { struct venc_enc_param enc_prm; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx); - struct vb2_v4l2_buffer *vb2_v4l2 = - container_of(vb, struct vb2_v4l2_buffer, vb2_buf); + struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx); struct mtk_video_enc_buf *mtk_buf = container_of(vb2_v4l2, struct mtk_video_enc_buf, vb); @@ -1067,12 +1059,11 @@ static void mtk_venc_worker(struct work_struct *work) { struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx, encode_work); - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct venc_frm_buf frm_buf; struct mtk_vcodec_mem bs_buf; struct venc_done_result enc_result; int ret, i; - struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2; /* check dst_buf, dst_buf may be removed in device_run * to stored encdoe header so we need check dst_buf and @@ -1086,15 +1077,15 @@ static void mtk_venc_worker(struct work_struct *work) src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); memset(&frm_buf, 0, sizeof(frm_buf)); - for (i = 0; i < src_buf->num_planes ; i++) { + for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) { frm_buf.fb_addr[i].dma_addr = - vb2_dma_contig_plane_dma_addr(src_buf, i); + vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, i); frm_buf.fb_addr[i].size = - (size_t)src_buf->planes[i].length; + (size_t)src_buf->vb2_buf.planes[i].length; } - bs_buf.va = vb2_plane_vaddr(dst_buf, 0); - bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); - bs_buf.size = (size_t)dst_buf->planes[0].length; + bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0); + bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); + bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length; mtk_v4l2_debug(2, "Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu", @@ -1108,28 +1099,21 @@ static void mtk_venc_worker(struct work_struct *work) ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME, &frm_buf, &bs_buf, &enc_result); - src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf); - dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf); - - dst_buf->timestamp = src_buf->timestamp; - dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode; + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; + dst_buf->timecode = src_buf->timecode; if (enc_result.is_key_frm) - dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME; + dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME; if (ret) { - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); dst_buf->planes[0].bytesused = 0; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); mtk_v4l2_err("venc_if_encode failed=%d", ret); } else { - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); dst_buf->planes[0].bytesused = enc_result.bs_size; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); mtk_v4l2_debug(2, "venc_if_encode bs size=%d", enc_result.bs_size); } @@ -1137,7 +1121,7 @@ static void mtk_venc_worker(struct work_struct *work) v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx); mtk_v4l2_debug(1, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>", - src_buf->index, dst_buf->index, ret, + src_buf->vb2_buf.index, dst_buf->vb2_buf.index, ret, enc_result.bs_size); } diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c index 7c025045ea90..39375b8ea27c 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c @@ -27,9 +27,11 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) { struct device_node *node; struct platform_device *pdev; - struct device *dev; struct mtk_vcodec_pm *pm; - int ret = 0; + struct mtk_vcodec_clk *enc_clk; + struct mtk_vcodec_clk_info *clk_info; + int ret = 0, i = 0; + struct device *dev; pdev = mtkdev->plat_dev; pm = &mtkdev->pm; @@ -37,6 +39,7 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) pm->mtkdev = mtkdev; pm->dev = &pdev->dev; dev = &pdev->dev; + enc_clk = &pm->venc_clk; node = of_parse_phandle(dev->of_node, "mediatek,larb", 0); if (!node) { @@ -68,28 +71,34 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) pdev = mtkdev->plat_dev; pm->dev = &pdev->dev; - pm->vencpll_d2 = devm_clk_get(&pdev->dev, "venc_sel_src"); - if (IS_ERR(pm->vencpll_d2)) { - mtk_v4l2_err("devm_clk_get vencpll_d2 fail"); - ret = PTR_ERR(pm->vencpll_d2); - } - - pm->venc_sel = devm_clk_get(&pdev->dev, "venc_sel"); - if (IS_ERR(pm->venc_sel)) { - mtk_v4l2_err("devm_clk_get venc_sel fail"); - ret = PTR_ERR(pm->venc_sel); + enc_clk->clk_num = of_property_count_strings(pdev->dev.of_node, + "clock-names"); + if (enc_clk->clk_num > 0) { + enc_clk->clk_info = devm_kcalloc(&pdev->dev, + enc_clk->clk_num, sizeof(*clk_info), + GFP_KERNEL); + if (!enc_clk->clk_info) + return -ENOMEM; + } else { + mtk_v4l2_err("Failed to get venc clock count"); + return -EINVAL; } - pm->univpll1_d2 = devm_clk_get(&pdev->dev, "venc_lt_sel_src"); - if (IS_ERR(pm->univpll1_d2)) { - mtk_v4l2_err("devm_clk_get univpll1_d2 fail"); - ret = PTR_ERR(pm->univpll1_d2); - } - - pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel"); - if (IS_ERR(pm->venc_lt_sel)) { - mtk_v4l2_err("devm_clk_get venc_lt_sel fail"); - ret = PTR_ERR(pm->venc_lt_sel); + for (i = 0; i < enc_clk->clk_num; i++) { + clk_info = &enc_clk->clk_info[i]; + ret = of_property_read_string_index(pdev->dev.of_node, + "clock-names", i, &clk_info->clk_name); + if (ret) { + mtk_v4l2_err("venc failed to get clk name %d", i); + return ret; + } + clk_info->vcodec_clk = devm_clk_get(&pdev->dev, + clk_info->clk_name); + if (IS_ERR(clk_info->vcodec_clk)) { + mtk_v4l2_err("venc devm_clk_get (%d)%s fail", i, + clk_info->clk_name); + return PTR_ERR(clk_info->vcodec_clk); + } } return ret; @@ -102,38 +111,45 @@ void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *mtkdev) void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm) { - int ret; - - ret = clk_prepare_enable(pm->venc_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable fail %d", ret); - - ret = clk_set_parent(pm->venc_sel, pm->vencpll_d2); - if (ret) - mtk_v4l2_err("clk_set_parent fail %d", ret); - - ret = clk_prepare_enable(pm->venc_lt_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable fail %d", ret); - - ret = clk_set_parent(pm->venc_lt_sel, pm->univpll1_d2); - if (ret) - mtk_v4l2_err("clk_set_parent fail %d", ret); + struct mtk_vcodec_clk *enc_clk = &pm->venc_clk; + int ret, i = 0; + + for (i = 0; i < enc_clk->clk_num; i++) { + ret = clk_prepare_enable(enc_clk->clk_info[i].vcodec_clk); + if (ret) { + mtk_v4l2_err("venc clk_prepare_enable %d %s fail %d", i, + enc_clk->clk_info[i].clk_name, ret); + goto clkerr; + } + } ret = mtk_smi_larb_get(pm->larbvenc); - if (ret) + if (ret) { mtk_v4l2_err("mtk_smi_larb_get larb3 fail %d", ret); - + goto larbvencerr; + } ret = mtk_smi_larb_get(pm->larbvenclt); - if (ret) + if (ret) { mtk_v4l2_err("mtk_smi_larb_get larb4 fail %d", ret); + goto larbvenclterr; + } + return; +larbvenclterr: + mtk_smi_larb_put(pm->larbvenc); +larbvencerr: +clkerr: + for (i -= 1; i >= 0; i--) + clk_disable_unprepare(enc_clk->clk_info[i].vcodec_clk); } void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm) { + struct mtk_vcodec_clk *enc_clk = &pm->venc_clk; + int i = 0; + mtk_smi_larb_put(pm->larbvenc); mtk_smi_larb_put(pm->larbvenclt); - clk_disable_unprepare(pm->venc_lt_sel); - clk_disable_unprepare(pm->venc_sel); + for (i = enc_clk->clk_num - 1; i >= 0; i--) + clk_disable_unprepare(enc_clk->clk_info[i].vcodec_clk); } diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c index aa3ce41898bc..02c960c63ac0 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c @@ -55,7 +55,7 @@ struct h264_fb { /** * struct h264_ring_fb_list - ring frame buffer list - * @fb_list : frame buffer arrary + * @fb_list : frame buffer array * @read_idx : read index * @write_idx : write index * @count : buffer count in list @@ -72,7 +72,7 @@ struct h264_ring_fb_list { /** * struct vdec_h264_dec_info - decode information * @dpb_sz : decoding picture buffer size - * @resolution_changed : resoltion change happen + * @resolution_changed : resolution change happen * @realloc_mv_buf : flag to notify driver to re-allocate mv buffer * @reserved : for 8 bytes alignment * @bs_dma : Input bit-stream buffer dma address diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c index 3e84a761db3a..bac3723038de 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c @@ -120,7 +120,7 @@ struct vdec_vp8_hw_reg_base { /** * struct vdec_vp8_vpu_inst - VPU instance for VP8 decode * @wq_hd : Wait queue to wait VPU message ack - * @signaled : 1 - Host has received ack message from VPU, 0 - not recevie + * @signaled : 1 - Host has received ack message from VPU, 0 - not receive * @failure : VPU execution result status 0 - success, others - fail * @inst_addr : VPU decoder instance address */ diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h index ded1154481cd..9a21591f3818 100644 --- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h +++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h @@ -80,7 +80,7 @@ void vdec_if_deinit(struct mtk_vcodec_ctx *ctx); * vdec_if_decode() - trigger decode * @ctx : [in] v4l2 context * @bs : [in] input bitstream - * @fb : [in] frame buffer to store decoded frame, when null menas parse + * @fb : [in] frame buffer to store decoded frame, when null means parse * header only * @res_chg : [out] resolution change happens if current bs have different * picture width/height diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h index cd37bb2a610f..79d8eac7f5e2 100644 --- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h +++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h @@ -62,7 +62,7 @@ int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len); /** * vpu_dec_end - end decoding, basically the function will be invoked once * when HW decoding done interrupt received successfully. The - * decoder in VPU will continute to do referene frame management + * decoder in VPU will continue to do reference frame management * and check if there is a new decoded frame available to display. * * @vpu : instance for vdec_vpu_inst diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c index 27b078cf98e3..f60f499c596b 100644 --- a/drivers/media/platform/mx2_emmaprp.c +++ b/drivers/media/platform/mx2_emmaprp.c @@ -274,7 +274,7 @@ static void emmaprp_device_run(void *priv) { struct emmaprp_ctx *ctx = priv; struct emmaprp_q_data *s_q_data, *d_q_data; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct emmaprp_dev *pcdev = ctx->dev; unsigned int s_width, s_height; unsigned int d_width, d_height; @@ -294,8 +294,8 @@ static void emmaprp_device_run(void *priv) d_height = d_q_data->height; d_size = d_width * d_height; - p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0); - p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); + p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); if (!p_in || !p_out) { v4l2_err(&pcdev->v4l2_dev, "Acquiring kernel pointers to buffers failed\n"); diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index f447ae3bb465..37f0d7146dfa 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -513,7 +513,7 @@ static int omapvid_apply_changes(struct omap_vout_device *vout) } static int omapvid_handle_interlace_display(struct omap_vout_device *vout, - unsigned int irqstatus, struct timeval timevalue) + unsigned int irqstatus, u64 ts) { u32 fid; @@ -537,7 +537,7 @@ static int omapvid_handle_interlace_display(struct omap_vout_device *vout, if (vout->cur_frm == vout->next_frm) goto err; - vout->cur_frm->ts = timevalue; + vout->cur_frm->ts = ts; vout->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&vout->cur_frm->done); vout->cur_frm = vout->next_frm; @@ -557,7 +557,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus) int ret, fid, mgr_id; u32 addr, irq; struct omap_overlay *ovl; - struct timeval timevalue; + u64 ts; struct omapvideo_info *ovid; struct omap_dss_device *cur_display; struct omap_vout_device *vout = (struct omap_vout_device *)arg; @@ -577,7 +577,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus) return; spin_lock(&vout->vbq_lock); - v4l2_get_timestamp(&timevalue); + ts = ktime_get_ns(); switch (cur_display->type) { case OMAP_DISPLAY_TYPE_DSI: @@ -595,7 +595,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus) break; case OMAP_DISPLAY_TYPE_VENC: fid = omapvid_handle_interlace_display(vout, irqstatus, - timevalue); + ts); if (!fid) goto vout_isr_err; break; @@ -608,7 +608,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus) } if (!vout->first_int && (vout->cur_frm != vout->next_frm)) { - vout->cur_frm->ts = timevalue; + vout->cur_frm->ts = ts; vout->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&vout->cur_frm->done); vout->cur_frm = vout->next_frm; @@ -1129,7 +1129,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh, } timing = &dssdev->panel.timings; - /* We dont support RGB24-packed mode if vrfb rotation + /* We don't support RGB24-packed mode if vrfb rotation * is enabled*/ if ((is_rotation_enabled(vout)) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) { @@ -1147,7 +1147,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh, vout->fbuf.fmt.width = timing->x_res; } - /* change to samller size is OK */ + /* change to smaller size is OK */ bpp = omap_vout_try_format(&f->fmt.pix); f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp; diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h index 56b630b1c8b4..c740393c8509 100644 --- a/drivers/media/platform/omap/omap_voutdef.h +++ b/drivers/media/platform/omap/omap_voutdef.h @@ -37,7 +37,7 @@ #define VID_MAX_WIDTH 1280 /* Largest width */ #define VID_MAX_HEIGHT 720 /* Largest height */ -/* Mimimum requirement is 2x2 for DSS */ +/* Minimum requirement is 2x2 for DSS */ #define VID_MIN_WIDTH 2 #define VID_MIN_HEIGHT 2 @@ -135,7 +135,7 @@ struct omap_vout_device { enum omap_color_mode dss_mode; /* we don't allow to request new buffer when old buffers are - * still mmaped + * still mmapped */ int mmap_count; diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 13f2828d880d..bd57174d81a7 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1517,7 +1517,7 @@ void omap3isp_print_status(struct isp_device *isp) * * To solve this problem power management support is split into prepare/complete * and suspend/resume operations. The pipelines are stopped in prepare() and the - * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in + * ISP clocks get disabled in suspend(). Similarly, the clocks are re-enabled in * resume(), and the the pipelines are restarted in complete(). * * TODO: PM dependencies between the ISP and sensors are not modelled explicitly diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c index 14a1c24037c4..261ad1175f98 100644 --- a/drivers/media/platform/omap3isp/ispccdc.c +++ b/drivers/media/platform/omap3isp/ispccdc.c @@ -1594,7 +1594,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc) return 0; /* We're in continuous mode, and memory writes were disabled due to a - * buffer underrun. Reenable them now that we have a buffer. The buffer + * buffer underrun. Re-enable them now that we have a buffer. The buffer * address has been set in ccdc_video_queue. */ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) { @@ -1712,7 +1712,7 @@ static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc) * data to memory the CCDC and LSC are stopped immediately but * without change the CCDC stopping state machine. The CCDC * stopping state machine should be used only when user request - * for stopping is received (SINGLESHOT is an exeption). + * for stopping is received (SINGLESHOT is an exception). */ switch (ccdc->state) { case ISP_PIPELINE_STREAM_SINGLESHOT: diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c index 9c180f607bcb..da66ea65be5d 100644 --- a/drivers/media/platform/omap3isp/ispcsi2.c +++ b/drivers/media/platform/omap3isp/ispcsi2.c @@ -710,7 +710,7 @@ static void csi2_isr_ctx(struct isp_csi2_device *csi2, /* Skip interrupts until we reach the frame skip count. The CSI2 will be * automatically disabled, as the frame skip count has been programmed - * in the CSI2_CTx_CTRL1::COUNT field, so reenable it. + * in the CSI2_CTx_CTRL1::COUNT field, so re-enable it. * * It would have been nice to rely on the FRAME_NUMBER interrupt instead * but it turned out that the interrupt is only generated when the CSI2 diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c index 5f930560eb30..4fe228752a43 100644 --- a/drivers/media/platform/pxa_camera.c +++ b/drivers/media/platform/pxa_camera.c @@ -1631,7 +1631,7 @@ static int pxa_camera_set_bus_param(struct pxa_camera_dev *pcdev) pcdev->channels = 1; - /* Make choises, based on platform preferences */ + /* Make choices, based on platform preferences */ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) { if (pcdev->platform_flags & PXA_CAMERA_HSP) @@ -2394,15 +2394,17 @@ static int pxa_camera_probe(struct platform_device *pdev) pcdev->res = res; pcdev->pdata = pdev->dev.platform_data; - if (pdev->dev.of_node && !pcdev->pdata) { - err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd); - } else { + if (pcdev->pdata) { pcdev->platform_flags = pcdev->pdata->flags; pcdev->mclk = pcdev->pdata->mclk_10khz * 10000; pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C; pcdev->asd.match.i2c.adapter_id = pcdev->pdata->sensor_i2c_adapter_id; pcdev->asd.match.i2c.address = pcdev->pdata->sensor_i2c_address; + } else if (pdev->dev.of_node) { + err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd); + } else { + return -ENODEV; } if (err < 0) return err; diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index cb411eb85ee4..739366744e0f 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -455,7 +455,7 @@ static const struct venus_resources msm8996_res = { .reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset), .clks = {"core", "iface", "bus", "mbus" }, .clks_num = 4, - .max_load = 2563200, + .max_load = 3110400, /* 4096x2160@90 */ .hfi_version = HFI_VERSION_3XX, .vmem_id = VIDC_RESOURCE_NONE, .vmem_size = 0, @@ -465,10 +465,12 @@ static const struct venus_resources msm8996_res = { }; static const struct freq_tbl sdm845_freq_table[] = { - { 1944000, 380000000 }, /* 4k UHD @ 60 */ - { 972000, 320000000 }, /* 4k UHD @ 30 */ - { 489600, 200000000 }, /* 1080p @ 60 */ - { 244800, 100000000 }, /* 1080p @ 30 */ + { 3110400, 533000000 }, /* 4096x2160@90 */ + { 2073600, 444000000 }, /* 4096x2160@60 */ + { 1944000, 404000000 }, /* 3840x2160@60 */ + { 972000, 330000000 }, /* 3840x2160@30 */ + { 489600, 200000000 }, /* 1920x1080@60 */ + { 244800, 100000000 }, /* 1920x1080@30 */ }; static const struct venus_resources sdm845_res = { diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index 6382cea29185..7a3feb5cee00 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -134,6 +134,7 @@ struct venus_core { struct video_firmware { struct device *dev; struct iommu_domain *iommu_domain; + size_t mapped_mem_size; } fw; struct mutex lock; struct list_head instances; @@ -218,7 +219,7 @@ struct venus_buffer { #define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb) /** - * struct venus_inst - holds per instance paramerters + * struct venus_inst - holds per instance parameters * * @list: used for attach an instance to the core * @lock: instance lock diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index c29acfd70c1b..6cfa8021721e 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -35,14 +35,15 @@ static void venus_reset_cpu(struct venus_core *core) { + u32 fw_size = core->fw.mapped_mem_size; void __iomem *base = core->base; writel(0, base + WRAPPER_FW_START_ADDR); - writel(VENUS_FW_MEM_SIZE, base + WRAPPER_FW_END_ADDR); + writel(fw_size, base + WRAPPER_FW_END_ADDR); writel(0, base + WRAPPER_CPA_START_ADDR); - writel(VENUS_FW_MEM_SIZE, base + WRAPPER_CPA_END_ADDR); - writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_START_ADDR); - writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_END_ADDR); + writel(fw_size, base + WRAPPER_CPA_END_ADDR); + writel(fw_size, base + WRAPPER_NONPIX_START_ADDR); + writel(fw_size, base + WRAPPER_NONPIX_END_ADDR); writel(0x0, base + WRAPPER_CPU_CGC_DIS); writel(0x0, base + WRAPPER_CPU_CLOCK_CONFIG); @@ -74,6 +75,9 @@ static int venus_load_fw(struct venus_core *core, const char *fwname, void *mem_va; int ret; + *mem_phys = 0; + *mem_size = 0; + dev = core->dev; node = of_parse_phandle(dev->of_node, "memory-region", 0); if (!node) { @@ -85,28 +89,30 @@ static int venus_load_fw(struct venus_core *core, const char *fwname, if (ret) return ret; + ret = request_firmware(&mdt, fwname, dev); + if (ret < 0) + return ret; + + fw_size = qcom_mdt_get_size(mdt); + if (fw_size < 0) { + ret = fw_size; + goto err_release_fw; + } + *mem_phys = r.start; *mem_size = resource_size(&r); - if (*mem_size < VENUS_FW_MEM_SIZE) - return -EINVAL; + if (*mem_size < fw_size || fw_size > VENUS_FW_MEM_SIZE) { + ret = -EINVAL; + goto err_release_fw; + } mem_va = memremap(r.start, *mem_size, MEMREMAP_WC); if (!mem_va) { dev_err(dev, "unable to map memory region: %pa+%zx\n", &r.start, *mem_size); - return -ENOMEM; - } - - ret = request_firmware(&mdt, fwname, dev); - if (ret < 0) - goto err_unmap; - - fw_size = qcom_mdt_get_size(mdt); - if (fw_size < 0) { - ret = fw_size; - release_firmware(mdt); - goto err_unmap; + ret = -ENOMEM; + goto err_release_fw; } if (core->use_tz) @@ -116,10 +122,9 @@ static int venus_load_fw(struct venus_core *core, const char *fwname, ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID, mem_va, *mem_phys, *mem_size, NULL); - release_firmware(mdt); - -err_unmap: memunmap(mem_va); +err_release_fw: + release_firmware(mdt); return ret; } @@ -135,6 +140,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys, return -EPROBE_DEFER; iommu = core->fw.iommu_domain; + core->fw.mapped_mem_size = mem_size; ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size, IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV); @@ -150,6 +156,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys, static int venus_shutdown_no_tz(struct venus_core *core) { + const size_t mapped = core->fw.mapped_mem_size; struct iommu_domain *iommu; size_t unmapped; u32 reg; @@ -166,8 +173,8 @@ static int venus_shutdown_no_tz(struct venus_core *core) iommu = core->fw.iommu_domain; - unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, VENUS_FW_MEM_SIZE); - if (unmapped != VENUS_FW_MEM_SIZE) + unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped); + if (unmapped != mapped) dev_err(dev, "failed to unmap firmware\n"); return 0; diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index e436385bc5ab..5cad601d4c57 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -439,9 +439,6 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) fdata.flags = 0; fdata.clnt_data = vbuf->vb2_buf.index; - if (!fdata.timestamp) - fdata.flags |= HFI_BUFFERFLAG_TIMESTAMPINVALID; - if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { fdata.buffer_type = HFI_BUFFER_INPUT; fdata.filled_len = vb2_get_plane_payload(vb, 0); diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c index f0719ce24b97..594d80434004 100644 --- a/drivers/media/platform/rcar-vin/rcar-core.c +++ b/drivers/media/platform/rcar-vin/rcar-core.c @@ -131,9 +131,13 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags, !is_media_entity_v4l2_video_device(link->sink->entity)) return 0; - /* If any entity is in use don't allow link changes. */ + /* + * Don't allow link changes if any entity in the graph is + * streaming, modifying the CHSEL register fields can disrupt + * running streams. + */ media_device_for_each_entity(entity, &group->mdev) - if (entity->use_count) + if (entity->stream_count) return -EBUSY; mutex_lock(&group->lock); @@ -542,9 +546,7 @@ static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier, vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name); - mutex_lock(&vin->lock); rvin_parallel_subdevice_detach(vin); - mutex_unlock(&vin->lock); } static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier, @@ -554,9 +556,7 @@ static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier, struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); int ret; - mutex_lock(&vin->lock); ret = rvin_parallel_subdevice_attach(vin, subdev); - mutex_unlock(&vin->lock); if (ret) return ret; @@ -664,7 +664,6 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier) } /* Create all media device links between VINs and CSI-2's. */ - mutex_lock(&vin->group->lock); for (route = vin->info->routes; route->mask; route++) { struct media_pad *source_pad, *sink_pad; struct media_entity *source, *sink; @@ -700,7 +699,6 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier) break; } } - mutex_unlock(&vin->group->lock); return ret; } @@ -716,8 +714,6 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier, if (vin->group->vin[i]) rvin_v4l2_unregister(vin->group->vin[i]); - mutex_lock(&vin->group->lock); - for (i = 0; i < RVIN_CSI_MAX; i++) { if (vin->group->csi[i].fwnode != asd->match.fwnode) continue; @@ -725,8 +721,6 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier, vin_dbg(vin, "Unbind CSI-2 %s from slot %u\n", subdev->name, i); break; } - - mutex_unlock(&vin->group->lock); } static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier, @@ -736,8 +730,6 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier, struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); unsigned int i; - mutex_lock(&vin->group->lock); - for (i = 0; i < RVIN_CSI_MAX; i++) { if (vin->group->csi[i].fwnode != asd->match.fwnode) continue; @@ -746,8 +738,6 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier, break; } - mutex_unlock(&vin->group->lock); - return 0; } @@ -1146,6 +1136,10 @@ static const struct rvin_info rcar_info_r8a77995 = { static const struct of_device_id rvin_of_id_table[] = { { + .compatible = "renesas,vin-r8a774c0", + .data = &rcar_info_r8a77990, + }, + { .compatible = "renesas,vin-r8a7778", .data = &rcar_info_m1, }, diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c index 6d356f5a9456..f64528d2be3c 100644 --- a/drivers/media/platform/rcar-vin/rcar-csi2.c +++ b/drivers/media/platform/rcar-vin/rcar-csi2.c @@ -152,37 +152,37 @@ static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = { }; static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = { - { .mbps = 89, .reg = 0x00 }, - { .mbps = 99, .reg = 0x20 }, - { .mbps = 109, .reg = 0x40 }, - { .mbps = 129, .reg = 0x02 }, - { .mbps = 139, .reg = 0x22 }, - { .mbps = 149, .reg = 0x42 }, - { .mbps = 169, .reg = 0x04 }, - { .mbps = 179, .reg = 0x24 }, - { .mbps = 199, .reg = 0x44 }, - { .mbps = 219, .reg = 0x06 }, - { .mbps = 239, .reg = 0x26 }, - { .mbps = 249, .reg = 0x46 }, - { .mbps = 269, .reg = 0x08 }, - { .mbps = 299, .reg = 0x28 }, - { .mbps = 329, .reg = 0x0a }, - { .mbps = 359, .reg = 0x2a }, - { .mbps = 399, .reg = 0x4a }, - { .mbps = 449, .reg = 0x0c }, - { .mbps = 499, .reg = 0x2c }, - { .mbps = 549, .reg = 0x0e }, - { .mbps = 599, .reg = 0x2e }, - { .mbps = 649, .reg = 0x10 }, - { .mbps = 699, .reg = 0x30 }, - { .mbps = 749, .reg = 0x12 }, - { .mbps = 799, .reg = 0x32 }, - { .mbps = 849, .reg = 0x52 }, - { .mbps = 899, .reg = 0x72 }, - { .mbps = 949, .reg = 0x14 }, - { .mbps = 999, .reg = 0x34 }, - { .mbps = 1049, .reg = 0x54 }, - { .mbps = 1099, .reg = 0x74 }, + { .mbps = 80, .reg = 0x00 }, + { .mbps = 90, .reg = 0x20 }, + { .mbps = 100, .reg = 0x40 }, + { .mbps = 110, .reg = 0x02 }, + { .mbps = 130, .reg = 0x22 }, + { .mbps = 140, .reg = 0x42 }, + { .mbps = 150, .reg = 0x04 }, + { .mbps = 170, .reg = 0x24 }, + { .mbps = 180, .reg = 0x44 }, + { .mbps = 200, .reg = 0x06 }, + { .mbps = 220, .reg = 0x26 }, + { .mbps = 240, .reg = 0x46 }, + { .mbps = 250, .reg = 0x08 }, + { .mbps = 270, .reg = 0x28 }, + { .mbps = 300, .reg = 0x0a }, + { .mbps = 330, .reg = 0x2a }, + { .mbps = 360, .reg = 0x4a }, + { .mbps = 400, .reg = 0x0c }, + { .mbps = 450, .reg = 0x2c }, + { .mbps = 500, .reg = 0x0e }, + { .mbps = 550, .reg = 0x2e }, + { .mbps = 600, .reg = 0x10 }, + { .mbps = 650, .reg = 0x30 }, + { .mbps = 700, .reg = 0x12 }, + { .mbps = 750, .reg = 0x32 }, + { .mbps = 800, .reg = 0x52 }, + { .mbps = 850, .reg = 0x72 }, + { .mbps = 900, .reg = 0x14 }, + { .mbps = 950, .reg = 0x34 }, + { .mbps = 1000, .reg = 0x54 }, + { .mbps = 1050, .reg = 0x74 }, { .mbps = 1125, .reg = 0x16 }, { /* sentinel */ }, }; @@ -986,6 +986,10 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = { static const struct of_device_id rcar_csi2_of_table[] = { { + .compatible = "renesas,r8a774c0-csi2", + .data = &rcar_csi2_info_r8a77990, + }, + { .compatible = "renesas,r8a7795-csi2", .data = &rcar_csi2_info_r8a7795, }, diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c index 92323310f735..2207a31d355e 100644 --- a/drivers/media/platform/rcar-vin/rcar-dma.c +++ b/drivers/media/platform/rcar-vin/rcar-dma.c @@ -681,7 +681,7 @@ static int rvin_setup(struct rvin_dev *vin) break; } - /* Enable VSYNC Field Toogle mode after one VSYNC input */ + /* Enable VSYNC Field Toggle mode after one VSYNC input */ if (vin->info->model == RCAR_GEN3) dmr2 = VNDMR2_FTEV; else @@ -1341,5 +1341,5 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel) pm_runtime_put(vin->dev); - return ret; + return 0; } diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c index 7a2851790b91..7cbdcbf9b090 100644 --- a/drivers/media/platform/rcar-vin/rcar-v4l2.c +++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c @@ -665,7 +665,7 @@ static void rvin_mc_try_format(struct rvin_dev *vin, * The V4L2 specification clearly documents the colorspace fields * as being set by drivers for capture devices. Using the values * supplied by userspace thus wouldn't comply with the API. Until - * the API is updated force fixed vaules. + * the API is updated force fixed values. */ pix->colorspace = RVIN_DEFAULT_COLORSPACE; pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace); @@ -964,7 +964,7 @@ void rvin_v4l2_unregister(struct rvin_dev *vin) v4l2_info(&vin->v4l2_dev, "Removing %s\n", video_device_node_name(&vin->vdev)); - /* Checks internaly if vdev have been init or not */ + /* Checks internally if vdev have been init or not */ video_unregister_device(&vin->vdev); } diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c index 96d1b1b3fe8e..843e50d17a72 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.c +++ b/drivers/media/platform/rockchip/rga/rga-hw.c @@ -256,7 +256,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) } /* - * Cacluate the up/down scaling mode/factor. + * Calculate the up/down scaling mode/factor. * * RGA used to scale the picture first, and then rotate second, * so we need to swap the w/h when rotate degree is 90/270. @@ -304,7 +304,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) } /* - * Cacluate the framebuffer virtual strides and active size, + * Calculate the framebuffer virtual strides and active size, * note that the step of vir_stride / vir_width is 4 byte words */ src_vir_info.data.vir_stride = ctx->in.stride >> 2; @@ -318,7 +318,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) dst_act_info.data.act_width = dst_w - 1; /* - * Cacluate the source framebuffer base address with offset pixel. + * Calculate the source framebuffer base address with offset pixel. */ src_offsets = rga_get_addr_offset(&ctx->in, src_x, src_y, src_w, src_h); diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c index 5c653287185f..b096227a9722 100644 --- a/drivers/media/platform/rockchip/rga/rga.c +++ b/drivers/media/platform/rockchip/rga/rga.c @@ -43,7 +43,7 @@ static void device_run(void *prv) { struct rga_ctx *ctx = prv; struct rockchip_rga *rga = ctx->rga; - struct vb2_buffer *src, *dst; + struct vb2_v4l2_buffer *src, *dst; unsigned long flags; spin_lock_irqsave(&rga->ctrl_lock, flags); @@ -53,8 +53,8 @@ static void device_run(void *prv) src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); - rga_buf_map(src); - rga_buf_map(dst); + rga_buf_map(&src->vb2_buf); + rga_buf_map(&dst->vb2_buf); rga_hw_start(rga); diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h index 1f5c8c94ce89..be5e7357dffc 100644 --- a/drivers/media/platform/s3c-camif/camif-core.h +++ b/drivers/media/platform/s3c-camif/camif-core.h @@ -260,7 +260,7 @@ struct camif_vp { * @clock: clocks required for the CAMIF operation * @lock: mutex protecting this data structure * @slock: spinlock protecting CAMIF registers - * @io_base: start address of the mmaped CAMIF registers + * @io_base: start address of the mmapped CAMIF registers */ struct camif_dev { struct media_device media_dev; diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index 57ab1d1085d1..971c47165010 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -513,7 +513,7 @@ static void device_run(void *prv) { struct g2d_ctx *ctx = prv; struct g2d_dev *dev = ctx->dev; - struct vb2_buffer *src, *dst; + struct vb2_v4l2_buffer *src, *dst; unsigned long flags; u32 cmd = 0; @@ -528,10 +528,10 @@ static void device_run(void *prv) spin_lock_irqsave(&dev->ctrl_lock, flags); g2d_set_src_size(dev, &ctx->in); - g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0)); + g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0)); g2d_set_dst_size(dev, &ctx->out); - g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0)); + g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0)); g2d_set_rop4(dev, ctx->rop); g2d_set_flip(dev, ctx->flip); diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index 3f9000b70385..8cc730eccb6c 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -3,7 +3,7 @@ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> * Author: Jacek Anaszewski <j.anaszewski@samsung.com> * * This program is free software; you can redistribute it and/or modify @@ -793,14 +793,14 @@ static void skip(struct s5p_jpeg_buffer *buf, long len); static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; unsigned int word; int c, x, components; jpeg_buffer.size = 2; /* Ls */ jpeg_buffer.data = - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2; + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2; jpeg_buffer.curr = 0; word = 0; @@ -830,14 +830,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; unsigned int word; int c, i, n, j; for (j = 0; j < ctx->out_q.dht.n; ++j) { jpeg_buffer.size = ctx->out_q.dht.len[j]; - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) + + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.dht.marker[j]; jpeg_buffer.curr = 0; @@ -889,13 +889,13 @@ static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; int c, x, components; jpeg_buffer.size = ctx->out_q.sof_len; jpeg_buffer.data = - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof; + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof; jpeg_buffer.curr = 0; skip(&jpeg_buffer, 5); /* P, Y, X */ @@ -920,14 +920,14 @@ static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; unsigned int word; int c, i, j; for (j = 0; j < ctx->out_q.dqt.n; ++j) { jpeg_buffer.size = ctx->out_q.dqt.len[j]; - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) + + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.dqt.marker[j]; jpeg_buffer.curr = 0; @@ -1293,13 +1293,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv, return 0; } -static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n, +static int enum_fmt(struct s5p_jpeg_ctx *ctx, + struct s5p_jpeg_fmt *sjpeg_formats, int n, struct v4l2_fmtdesc *f, u32 type) { int i, num = 0; + unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag; for (i = 0; i < n; ++i) { - if (sjpeg_formats[i].flags & type) { + if (sjpeg_formats[i].flags & type && + sjpeg_formats[i].flags & fmt_ver_flag) { /* index-th format of type type found ? */ if (num == f->index) break; @@ -1326,11 +1329,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv, struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, SJPEG_FMT_FLAG_ENC_CAPTURE); - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, - SJPEG_FMT_FLAG_DEC_CAPTURE); + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, + SJPEG_FMT_FLAG_DEC_CAPTURE); } static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv, @@ -1339,11 +1342,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv, struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, SJPEG_FMT_FLAG_ENC_OUTPUT); - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, - SJPEG_FMT_FLAG_DEC_OUTPUT); + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, + SJPEG_FMT_FLAG_DEC_OUTPUT); } static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx, @@ -2002,7 +2005,7 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx) v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, V4L2_CID_JPEG_RESTART_INTERVAL, - 0, 3, 0xffff, 0); + 0, 0xffff, 1, 0); if (ctx->jpeg->variant->version == SJPEG_S5P) mask = ~0x06; /* 422, 420 */ } @@ -2072,15 +2075,15 @@ static void s5p_jpeg_device_run(void *priv) { struct s5p_jpeg_ctx *ctx = priv; struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; unsigned long src_addr, dst_addr, flags; spin_lock_irqsave(&ctx->jpeg->slock, flags); src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); - src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); - dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); + dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); s5p_jpeg_reset(jpeg->regs); s5p_jpeg_poweron(jpeg->regs); @@ -2153,7 +2156,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; struct s5p_jpeg_fmt *fmt; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; struct s5p_jpeg_addr jpeg_addr = {}; u32 pix_size, padding_bytes = 0; @@ -2172,7 +2175,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); } - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); if (fmt->colplanes == 2) { jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes; @@ -2190,7 +2193,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; unsigned int jpeg_addr = 0; if (ctx->mode == S5P_JPEG_ENCODE) @@ -2198,7 +2201,7 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) else vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); if (jpeg->variant->version == SJPEG_EXYNOS5433 && ctx->mode == S5P_JPEG_DECODE) jpeg_addr += ctx->out_q.sos; @@ -2314,7 +2317,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; struct s5p_jpeg_fmt *fmt; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; struct s5p_jpeg_addr jpeg_addr = {}; u32 pix_size; @@ -2328,7 +2331,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) fmt = ctx->cap_q.fmt; } - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); if (fmt->colplanes == 2) { jpeg_addr.cb = jpeg_addr.y + pix_size; @@ -2346,7 +2349,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; unsigned int jpeg_addr = 0; if (ctx->mode == S5P_JPEG_ENCODE) @@ -2354,7 +2357,7 @@ static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) else vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr); } @@ -3220,7 +3223,7 @@ static struct platform_driver s5p_jpeg_driver = { module_platform_driver(s5p_jpeg_driver); -MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>"); +MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>"); MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>"); MODULE_DESCRIPTION("Samsung JPEG codec driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h index a46465e10351..144c102ff05f 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.h +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h @@ -3,7 +3,7 @@ * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -153,7 +153,7 @@ struct s5p_jpeg_variant { /** * struct jpeg_fmt - driver's internal color format data - * @name: format descritpion + * @name: format description * @fourcc: the fourcc code, 0 if not applicable * @depth: number of bits per pixel * @colplanes: number of color planes (1 for packed formats) @@ -193,7 +193,7 @@ struct s5p_jpeg_marker { * @sos: SOS marker's position relative to the buffer beginning * @dht: DHT markers' positions relative to the buffer beginning * @dqt: DQT markers' positions relative to the buffer beginning - * @sof: SOF0 marker's postition relative to the buffer beginning + * @sof: SOF0 marker's position relative to the buffer beginning * @sof_len: SOF0 marker's payload length (without length field itself) * @components: number of image components * @size: image buffer size in bytes diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c index b5f20e722b63..59c6263a71bf 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c @@ -3,7 +3,7 @@ * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h index f208fa3ed738..bfe746f8f750 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h +++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h @@ -3,7 +3,7 @@ * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h index df790b10140c..574f0e8021e5 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h +++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h @@ -5,7 +5,7 @@ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> * Author: Jacek Anaszewski <j.anaszewski@samsung.com> * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 8a5ba3bec3af..0a9f59d89185 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1089,7 +1089,6 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev, device_initialize(child); dev_set_name(child, "%s:%s", dev_name(dev), name); child->parent = dev; - child->bus = dev->bus; child->coherent_dma_mask = dev->coherent_dma_mask; child->dma_mask = dev->dma_mask; child->release = s5p_mfc_memdev_release; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index 20442a9b9f7a..24148020baa9 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -268,7 +268,7 @@ struct s5p_mfc_priv_buf { * @enc_ctrl_handler: control framework handler for encoding * @pm: power management control * @variant: MFC hardware variant information - * @num_inst: couter of active MFC instances + * @num_inst: counter of active MFC instances * @irqlock: lock for operations on videobuf2 queues * @condlock: lock for changing/checking if a context is ready to be * processed diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index ee7b15b335e0..9f832ba7bc8c 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -51,7 +51,7 @@ int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev) struct firmware *fw_blob; int i, err = -EINVAL; - /* Firmare has to be present as a separate file or compiled + /* Firmware has to be present as a separate file or compiled * into kernel. */ mfc_debug_enter(); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index f4c0e3a8f27d..e111f9c47179 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -602,7 +602,7 @@ static int vidioc_querybuf(struct file *file, void *priv, int i; if (buf->memory != V4L2_MEMORY_MMAP) { - mfc_err("Only mmaped buffers can be used\n"); + mfc_err("Only mmapped buffers can be used\n"); return -EINVAL; } mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 0913881219ff..6144e95f6425 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c @@ -1293,7 +1293,7 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) * First set the output frame buffers */ if (ctx->capture_state != QUEUE_BUFS_MMAPED) { - mfc_err("It seems that not all destination buffers were mmaped\nMFC requires that all destination are mmaped before starting processing\n"); + mfc_err("It seems that not all destination buffers were mmapped\nMFC requires that all destination are mmapped before starting processing\n"); return -EAGAIN; } if (list_empty(&ctx->src_queue)) { diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index 7c629be43205..281699ab7fe1 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c @@ -53,7 +53,7 @@ static int s5p_mfc_alloc_dec_temp_buffers_v6(struct s5p_mfc_ctx *ctx) return 0; } -/* Release temproary buffers for decoding */ +/* Release temporary buffers for decoding */ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx) { /* NOP */ @@ -1928,7 +1928,7 @@ static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) if (ctx->capture_state != QUEUE_BUFS_MMAPED) { mfc_err("It seems that not all destination buffers were\n" - "mmaped.MFC requires that all destination are mmaped\n" + "mmapped.MFC requires that all destination are mmapped\n" "before starting processing.\n"); return -EAGAIN; } diff --git a/drivers/media/platform/seco-cec/seco-cec.h b/drivers/media/platform/seco-cec/seco-cec.h index e632c4a2a044..843de8c7dfd4 100644 --- a/drivers/media/platform/seco-cec/seco-cec.h +++ b/drivers/media/platform/seco-cec/seco-cec.h @@ -106,7 +106,7 @@ #define SECOCEC_IR_COMMAND_MASK 0x007F #define SECOCEC_IR_COMMAND_SHL 0 #define SECOCEC_IR_ADDRESS_MASK 0x1F00 -#define SECOCEC_IR_ADDRESS_SHL 7 +#define SECOCEC_IR_ADDRESS_SHL 8 #define SECOCEC_IR_TOGGLE_MASK 0x8000 #define SECOCEC_IR_TOGGLE_SHL 15 diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c index 09ae64a0004c..d277cc674349 100644 --- a/drivers/media/platform/sh_veu.c +++ b/drivers/media/platform/sh_veu.c @@ -273,13 +273,13 @@ static void sh_veu_process(struct sh_veu_dev *veu, static void sh_veu_device_run(void *priv) { struct sh_veu_dev *veu = priv; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx); if (src_buf && dst_buf) - sh_veu_process(veu, src_buf, dst_buf); + sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf); } /* ========== video ioctls ========== */ diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig deleted file mode 100644 index 669d116b8f09..000000000000 --- a/drivers/media/platform/soc_camera/Kconfig +++ /dev/null @@ -1,26 +0,0 @@ -config SOC_CAMERA - tristate "SoC camera support" - depends on VIDEO_V4L2 && HAS_DMA && I2C - select VIDEOBUF2_CORE - help - SoC Camera is a common API to several cameras, not connecting - over a bus like PCI or USB. For example some i2c camera connected - directly to the data bus of an SoC. - -config SOC_CAMERA_SCALE_CROP - tristate - -config SOC_CAMERA_PLATFORM - tristate "platform camera support" - depends on SOC_CAMERA - help - This is a generic SoC camera platform driver, useful for testing - -config VIDEO_SH_MOBILE_CEU - tristate "SuperH Mobile CEU Interface driver" - depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK - depends on ARCH_SHMOBILE || COMPILE_TEST - select VIDEOBUF2_DMA_CONTIG - select SOC_CAMERA_SCALE_CROP - ---help--- - This is a v4l2 driver for the SuperH Mobile CEU Interface diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile deleted file mode 100644 index 07a451e8b228..000000000000 --- a/drivers/media/platform/soc_camera/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o -obj-$(CONFIG_SOC_CAMERA_SCALE_CROP) += soc_scale_crop.o - -# a platform subdevice driver stub, allowing to support cameras by adding a -# couple of callback functions to the board code -obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o - -# soc-camera host drivers have to be linked after camera drivers -obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c deleted file mode 100644 index 6803f744e307..000000000000 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ /dev/null @@ -1,1810 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * V4L2 Driver for SuperH Mobile CEU interface - * - * Copyright (C) 2008 Magnus Damm - * - * Based on V4L2 Driver for PXA camera host - "pxa_camera.c", - * - * Copyright (C) 2006, Sascha Hauer, Pengutronix - * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/io.h> -#include <linux/completion.h> -#include <linux/delay.h> -#include <linux/dma-mapping.h> -#include <linux/err.h> -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/interrupt.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/moduleparam.h> -#include <linux/of.h> -#include <linux/time.h> -#include <linux/slab.h> -#include <linux/device.h> -#include <linux/platform_device.h> -#include <linux/videodev2.h> -#include <linux/pm_runtime.h> -#include <linux/sched.h> - -#include <media/v4l2-async.h> -#include <media/v4l2-common.h> -#include <media/v4l2-dev.h> -#include <media/soc_camera.h> -#include <media/drv-intf/sh_mobile_ceu.h> -#include <media/videobuf2-dma-contig.h> -#include <media/v4l2-mediabus.h> -#include <media/drv-intf/soc_mediabus.h> - -#include "soc_scale_crop.h" - -/* register offsets for sh7722 / sh7723 */ - -#define CAPSR 0x00 /* Capture start register */ -#define CAPCR 0x04 /* Capture control register */ -#define CAMCR 0x08 /* Capture interface control register */ -#define CMCYR 0x0c /* Capture interface cycle register */ -#define CAMOR 0x10 /* Capture interface offset register */ -#define CAPWR 0x14 /* Capture interface width register */ -#define CAIFR 0x18 /* Capture interface input format register */ -#define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */ -#define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */ -#define CRCNTR 0x28 /* CEU register control register */ -#define CRCMPR 0x2c /* CEU register forcible control register */ -#define CFLCR 0x30 /* Capture filter control register */ -#define CFSZR 0x34 /* Capture filter size clip register */ -#define CDWDR 0x38 /* Capture destination width register */ -#define CDAYR 0x3c /* Capture data address Y register */ -#define CDACR 0x40 /* Capture data address C register */ -#define CDBYR 0x44 /* Capture data bottom-field address Y register */ -#define CDBCR 0x48 /* Capture data bottom-field address C register */ -#define CBDSR 0x4c /* Capture bundle destination size register */ -#define CFWCR 0x5c /* Firewall operation control register */ -#define CLFCR 0x60 /* Capture low-pass filter control register */ -#define CDOCR 0x64 /* Capture data output control register */ -#define CDDCR 0x68 /* Capture data complexity level register */ -#define CDDAR 0x6c /* Capture data complexity level address register */ -#define CEIER 0x70 /* Capture event interrupt enable register */ -#define CETCR 0x74 /* Capture event flag clear register */ -#define CSTSR 0x7c /* Capture status register */ -#define CSRTR 0x80 /* Capture software reset register */ -#define CDSSR 0x84 /* Capture data size register */ -#define CDAYR2 0x90 /* Capture data address Y register 2 */ -#define CDACR2 0x94 /* Capture data address C register 2 */ -#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */ -#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */ - -#undef DEBUG_GEOMETRY -#ifdef DEBUG_GEOMETRY -#define dev_geo dev_info -#else -#define dev_geo dev_dbg -#endif - -/* per video frame buffer */ -struct sh_mobile_ceu_buffer { - struct vb2_v4l2_buffer vb; /* v4l buffer must be first */ - struct list_head queue; -}; - -struct sh_mobile_ceu_dev { - struct soc_camera_host ici; - - unsigned int irq; - void __iomem *base; - size_t video_limit; - size_t buf_total; - - spinlock_t lock; /* Protects video buffer lists */ - struct list_head capture; - struct vb2_v4l2_buffer *active; - - struct sh_mobile_ceu_info *pdata; - struct completion complete; - - u32 cflcr; - - /* static max sizes either from platform data or default */ - int max_width; - int max_height; - - enum v4l2_field field; - int sequence; - unsigned long flags; - - unsigned int image_mode:1; - unsigned int is_16bit:1; - unsigned int frozen:1; -}; - -struct sh_mobile_ceu_cam { - /* CEU offsets within the camera output, before the CEU scaler */ - unsigned int ceu_left; - unsigned int ceu_top; - /* Client output, as seen by the CEU */ - unsigned int width; - unsigned int height; - /* - * User window from S_SELECTION / G_SELECTION, produced by client cropping and - * scaling, CEU scaling and CEU cropping, mapped back onto the client - * input window - */ - struct v4l2_rect subrect; - /* Camera cropping rectangle */ - struct v4l2_rect rect; - const struct soc_mbus_pixelfmt *extra_fmt; - u32 code; -}; - -static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_v4l2_buffer *vbuf) -{ - return container_of(vbuf, struct sh_mobile_ceu_buffer, vb); -} - -static void ceu_write(struct sh_mobile_ceu_dev *priv, - unsigned long reg_offs, u32 data) -{ - iowrite32(data, priv->base + reg_offs); -} - -static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs) -{ - return ioread32(priv->base + reg_offs); -} - -static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev) -{ - int i, success = 0; - - ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ - - /* wait CSTSR.CPTON bit */ - for (i = 0; i < 1000; i++) { - if (!(ceu_read(pcdev, CSTSR) & 1)) { - success++; - break; - } - udelay(1); - } - - /* wait CAPSR.CPKIL bit */ - for (i = 0; i < 1000; i++) { - if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) { - success++; - break; - } - udelay(1); - } - - if (2 != success) { - dev_warn(pcdev->ici.v4l2_dev.dev, "soft reset time out\n"); - return -EIO; - } - - return 0; -} - -/* - * Videobuf operations - */ - -/* - * .queue_setup() is called to check, whether the driver can accept the - * requested number of buffers and to fill in plane sizes - * for the current frame format if required - */ -static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, - unsigned int *count, unsigned int *num_planes, - unsigned int sizes[], struct device *alloc_devs[]) -{ - struct soc_camera_device *icd = soc_camera_from_vb2q(vq); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - if (!vq->num_buffers) - pcdev->sequence = 0; - - if (!*count) - *count = 2; - - /* Called from VIDIOC_REQBUFS or in compatibility mode */ - if (!*num_planes) - sizes[0] = icd->sizeimage; - else if (sizes[0] < icd->sizeimage) - return -EINVAL; - - /* If *num_planes != 0, we have already verified *count. */ - if (pcdev->video_limit) { - size_t size = PAGE_ALIGN(sizes[0]) * *count; - - if (size + pcdev->buf_total > pcdev->video_limit) - *count = (pcdev->video_limit - pcdev->buf_total) / - PAGE_ALIGN(sizes[0]); - } - - *num_planes = 1; - - dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]); - - return 0; -} - -#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */ -#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */ -#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */ -#define CEU_CEIER_VBP (1 << 20) /* vbp error */ -#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */ -#define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP) - - -/* - * return value doesn't reflex the success/failure to queue the new buffer, - * but rather the status of the previous buffer. - */ -static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) -{ - struct soc_camera_device *icd = pcdev->ici.icd; - dma_addr_t phys_addr_top, phys_addr_bottom; - unsigned long top1, top2; - unsigned long bottom1, bottom2; - u32 status; - bool planar; - int ret = 0; - - /* - * The hardware is _very_ picky about this sequence. Especially - * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge - * several not-so-well documented interrupt sources in CETCR. - */ - ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK); - status = ceu_read(pcdev, CETCR); - ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC); - if (!pcdev->frozen) - ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK); - ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP); - ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW); - - /* - * When a VBP interrupt occurs, a capture end interrupt does not occur - * and the image of that frame is not captured correctly. So, soft reset - * is needed here. - */ - if (status & CEU_CEIER_VBP) { - sh_mobile_ceu_soft_reset(pcdev); - ret = -EIO; - } - - if (pcdev->frozen) { - complete(&pcdev->complete); - return ret; - } - - if (!pcdev->active) - return ret; - - if (V4L2_FIELD_INTERLACED_BT == pcdev->field) { - top1 = CDBYR; - top2 = CDBCR; - bottom1 = CDAYR; - bottom2 = CDACR; - } else { - top1 = CDAYR; - top2 = CDACR; - bottom1 = CDBYR; - bottom2 = CDBCR; - } - - phys_addr_top = - vb2_dma_contig_plane_dma_addr(&pcdev->active->vb2_buf, 0); - - switch (icd->current_fmt->host_fmt->fourcc) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - planar = true; - break; - default: - planar = false; - } - - ceu_write(pcdev, top1, phys_addr_top); - if (V4L2_FIELD_NONE != pcdev->field) { - phys_addr_bottom = phys_addr_top + icd->bytesperline; - ceu_write(pcdev, bottom1, phys_addr_bottom); - } - - if (planar) { - phys_addr_top += icd->bytesperline * icd->user_height; - ceu_write(pcdev, top2, phys_addr_top); - if (V4L2_FIELD_NONE != pcdev->field) { - phys_addr_bottom = phys_addr_top + icd->bytesperline; - ceu_write(pcdev, bottom2, phys_addr_bottom); - } - } - - ceu_write(pcdev, CAPSR, 0x1); /* start capture */ - - return ret; -} - -static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); - - /* Added list head initialization on alloc */ - WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb); - - return 0; -} - -static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); - unsigned long size; - - size = icd->sizeimage; - - if (vb2_plane_size(vb, 0) < size) { - dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n", - vb->index, vb2_plane_size(vb, 0), size); - goto error; - } - - vb2_set_plane_payload(vb, 0, size); - - dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, - vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); - -#ifdef DEBUG - /* - * This can be useful if you want to see if we actually fill - * the buffer with something - */ - if (vb2_plane_vaddr(vb, 0)) - memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); -#endif - - spin_lock_irq(&pcdev->lock); - list_add_tail(&buf->queue, &pcdev->capture); - - if (!pcdev->active) { - /* - * Because there were no active buffer at this moment, - * we are not interested in the return value of - * sh_mobile_ceu_capture here. - */ - pcdev->active = vbuf; - sh_mobile_ceu_capture(pcdev); - } - spin_unlock_irq(&pcdev->lock); - - return; - -error: - vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); -} - -static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - spin_lock_irq(&pcdev->lock); - - if (pcdev->active == vbuf) { - /* disable capture (release DMA buffer), reset */ - ceu_write(pcdev, CAPSR, 1 << 16); - pcdev->active = NULL; - } - - /* - * Doesn't hurt also if the list is empty, but it hurts, if queuing the - * buffer failed, and .buf_init() hasn't been called - */ - if (buf->queue.next) - list_del_init(&buf->queue); - - pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0)); - dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, - pcdev->buf_total); - - spin_unlock_irq(&pcdev->lock); -} - -static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0)); - dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, - pcdev->buf_total); - - /* This is for locking debugging only */ - INIT_LIST_HEAD(&to_ceu_vb(vbuf)->queue); - return 0; -} - -static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q) -{ - struct soc_camera_device *icd = soc_camera_from_vb2q(q); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct list_head *buf_head, *tmp; - struct vb2_v4l2_buffer *vbuf; - - spin_lock_irq(&pcdev->lock); - - pcdev->active = NULL; - - list_for_each_safe(buf_head, tmp, &pcdev->capture) { - vbuf = &list_entry(buf_head, struct sh_mobile_ceu_buffer, - queue)->vb; - vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); - list_del_init(buf_head); - } - - spin_unlock_irq(&pcdev->lock); - - sh_mobile_ceu_soft_reset(pcdev); -} - -static const struct vb2_ops sh_mobile_ceu_videobuf_ops = { - .queue_setup = sh_mobile_ceu_videobuf_setup, - .buf_prepare = sh_mobile_ceu_videobuf_prepare, - .buf_queue = sh_mobile_ceu_videobuf_queue, - .buf_cleanup = sh_mobile_ceu_videobuf_release, - .buf_init = sh_mobile_ceu_videobuf_init, - .wait_prepare = vb2_ops_wait_prepare, - .wait_finish = vb2_ops_wait_finish, - .stop_streaming = sh_mobile_ceu_stop_streaming, -}; - -static irqreturn_t sh_mobile_ceu_irq(int irq, void *data) -{ - struct sh_mobile_ceu_dev *pcdev = data; - struct vb2_v4l2_buffer *vbuf; - int ret; - - spin_lock(&pcdev->lock); - - vbuf = pcdev->active; - if (!vbuf) - /* Stale interrupt from a released buffer */ - goto out; - - list_del_init(&to_ceu_vb(vbuf)->queue); - - if (!list_empty(&pcdev->capture)) - pcdev->active = &list_entry(pcdev->capture.next, - struct sh_mobile_ceu_buffer, queue)->vb; - else - pcdev->active = NULL; - - ret = sh_mobile_ceu_capture(pcdev); - vbuf->vb2_buf.timestamp = ktime_get_ns(); - if (!ret) { - vbuf->field = pcdev->field; - vbuf->sequence = pcdev->sequence++; - } - vb2_buffer_done(&vbuf->vb2_buf, - ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); - -out: - spin_unlock(&pcdev->lock); - - return IRQ_HANDLED; -} - -static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) -{ - dev_info(icd->parent, - "SuperH Mobile CEU driver attached to camera %d\n", - icd->devnum); - - return 0; -} - -static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) -{ - dev_info(icd->parent, - "SuperH Mobile CEU driver detached from camera %d\n", - icd->devnum); -} - -/* Called with .host_lock held */ -static int sh_mobile_ceu_clock_start(struct soc_camera_host *ici) -{ - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - pm_runtime_get_sync(ici->v4l2_dev.dev); - - pcdev->buf_total = 0; - - sh_mobile_ceu_soft_reset(pcdev); - - return 0; -} - -/* Called with .host_lock held */ -static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici) -{ - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - /* disable capture, disable interrupts */ - ceu_write(pcdev, CEIER, 0); - sh_mobile_ceu_soft_reset(pcdev); - - /* make sure active buffer is canceled */ - spin_lock_irq(&pcdev->lock); - if (pcdev->active) { - list_del_init(&to_ceu_vb(pcdev->active)->queue); - vb2_buffer_done(&pcdev->active->vb2_buf, VB2_BUF_STATE_ERROR); - pcdev->active = NULL; - } - spin_unlock_irq(&pcdev->lock); - - pm_runtime_put(ici->v4l2_dev.dev); -} - -/* - * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)" - * in SH7722 Hardware Manual - */ -static unsigned int size_dst(unsigned int src, unsigned int scale) -{ - unsigned int mant_pre = scale >> 12; - if (!src || !scale) - return src; - return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) * - mant_pre * 4096 / scale + 1; -} - -static u16 calc_scale(unsigned int src, unsigned int *dst) -{ - u16 scale; - - if (src == *dst) - return 0; - - scale = (src * 4096 / *dst) & ~7; - - while (scale > 4096 && size_dst(src, scale) < *dst) - scale -= 8; - - *dst = size_dst(src, scale); - - return scale; -} - -/* rect is guaranteed to not exceed the scaled camera rectangle */ -static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct sh_mobile_ceu_dev *pcdev = ici->priv; - unsigned int height, width, cdwdr_width, in_width, in_height; - unsigned int left_offset, top_offset; - u32 camor; - - dev_geo(icd->parent, "Crop %ux%u@%u:%u\n", - icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); - - left_offset = cam->ceu_left; - top_offset = cam->ceu_top; - - WARN_ON(icd->user_width & 3 || icd->user_height & 3); - - width = icd->user_width; - - if (pcdev->image_mode) { - in_width = cam->width; - if (!pcdev->is_16bit) { - in_width *= 2; - left_offset *= 2; - } - } else { - unsigned int w_factor; - - switch (icd->current_fmt->host_fmt->packing) { - case SOC_MBUS_PACKING_2X8_PADHI: - w_factor = 2; - break; - default: - w_factor = 1; - } - - in_width = cam->width * w_factor; - left_offset *= w_factor; - } - - cdwdr_width = icd->bytesperline; - - height = icd->user_height; - in_height = cam->height; - if (V4L2_FIELD_NONE != pcdev->field) { - height = (height / 2) & ~3; - in_height /= 2; - top_offset /= 2; - cdwdr_width *= 2; - } - - /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ - camor = left_offset | (top_offset << 16); - - dev_geo(icd->parent, - "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, - (in_height << 16) | in_width, (height << 16) | width, - cdwdr_width); - - ceu_write(pcdev, CAMOR, camor); - ceu_write(pcdev, CAPWR, (in_height << 16) | in_width); - /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */ - ceu_write(pcdev, CFSZR, (height << 16) | width); - ceu_write(pcdev, CDWDR, cdwdr_width); -} - -static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev) -{ - u32 capsr = ceu_read(pcdev, CAPSR); - ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */ - return capsr; -} - -static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr) -{ - unsigned long timeout = jiffies + 10 * HZ; - - /* - * Wait until the end of the current frame. It can take a long time, - * but if it has been aborted by a CAPSR reset, it shoule exit sooner. - */ - while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout)) - msleep(1); - - if (time_after(jiffies, timeout)) { - dev_err(pcdev->ici.v4l2_dev.dev, - "Timeout waiting for frame end! Interface problem?\n"); - return; - } - - /* Wait until reset clears, this shall not hang... */ - while (ceu_read(pcdev, CAPSR) & (1 << 16)) - udelay(10); - - /* Anything to restore? */ - if (capsr & ~(1 << 16)) - ceu_write(pcdev, CAPSR, capsr); -} - -#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \ - V4L2_MBUS_PCLK_SAMPLE_RISING | \ - V4L2_MBUS_HSYNC_ACTIVE_HIGH | \ - V4L2_MBUS_HSYNC_ACTIVE_LOW | \ - V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ - V4L2_MBUS_VSYNC_ACTIVE_LOW | \ - V4L2_MBUS_DATA_ACTIVE_HIGH) - -/* Capture is not running, no interrupts, no locking needed */ -static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; - unsigned long value, common_flags = CEU_BUS_FLAGS; - u32 capsr = capture_save_reset(pcdev); - unsigned int yuv_lineskip; - int ret; - - /* - * If the client doesn't implement g_mbus_config, we just use our - * platform data - */ - ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); - if (!ret) { - common_flags = soc_mbus_config_compatible(&cfg, - common_flags); - if (!common_flags) - return -EINVAL; - } else if (ret != -ENOIOCTLCMD) { - return ret; - } - - /* Make choises, based on platform preferences */ - if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) && - (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) { - if (pcdev->flags & SH_CEU_FLAG_HSYNC_LOW) - common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH; - else - common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW; - } - - if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) && - (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) { - if (pcdev->flags & SH_CEU_FLAG_VSYNC_LOW) - common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH; - else - common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW; - } - - cfg.flags = common_flags; - ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg); - if (ret < 0 && ret != -ENOIOCTLCMD) - return ret; - - if (icd->current_fmt->host_fmt->bits_per_sample > 8) - pcdev->is_16bit = 1; - else - pcdev->is_16bit = 0; - - ceu_write(pcdev, CRCNTR, 0); - ceu_write(pcdev, CRCMPR, 0); - - value = 0x00000010; /* data fetch by default */ - yuv_lineskip = 0x10; - - switch (icd->current_fmt->host_fmt->fourcc) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - /* convert 4:2:2 -> 4:2:0 */ - yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */ - /* fall-through */ - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - switch (cam->code) { - case MEDIA_BUS_FMT_UYVY8_2X8: - value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ - break; - case MEDIA_BUS_FMT_VYUY8_2X8: - value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ - break; - case MEDIA_BUS_FMT_YUYV8_2X8: - value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ - break; - case MEDIA_BUS_FMT_YVYU8_2X8: - value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ - break; - default: - BUG(); - } - } - - if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 || - icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61) - value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */ - - value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; - value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0; - - if (pcdev->is_16bit) - value |= 1 << 12; - else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT) - value |= 2 << 12; - - ceu_write(pcdev, CAMCR, value); - - ceu_write(pcdev, CAPCR, 0x00300000); - - switch (pcdev->field) { - case V4L2_FIELD_INTERLACED_TB: - value = 0x101; - break; - case V4L2_FIELD_INTERLACED_BT: - value = 0x102; - break; - default: - value = 0; - break; - } - ceu_write(pcdev, CAIFR, value); - - sh_mobile_ceu_set_rect(icd); - mdelay(1); - - dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr); - ceu_write(pcdev, CFLCR, pcdev->cflcr); - - /* - * A few words about byte order (observed in Big Endian mode) - * - * In data fetch mode bytes are received in chunks of 8 bytes. - * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) - * - * The data is however by default written to memory in reverse order: - * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte) - * - * The lowest three bits of CDOCR allows us to do swapping, - * using 7 we swap the data bytes to match the incoming order: - * D0, D1, D2, D3, D4, D5, D6, D7 - */ - value = 0x00000007 | yuv_lineskip; - - ceu_write(pcdev, CDOCR, value); - ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ - - capture_restore(pcdev, capsr); - - /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */ - return 0; -} - -static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, - unsigned char buswidth) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - unsigned long common_flags = CEU_BUS_FLAGS; - struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; - int ret; - - ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); - if (!ret) - common_flags = soc_mbus_config_compatible(&cfg, - common_flags); - else if (ret != -ENOIOCTLCMD) - return ret; - - if (!common_flags || buswidth > 16) - return -EINVAL; - - return 0; -} - -static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = { - { - .fourcc = V4L2_PIX_FMT_NV12, - .name = "NV12", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_1_5X8, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, - }, { - .fourcc = V4L2_PIX_FMT_NV21, - .name = "NV21", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_1_5X8, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, - }, { - .fourcc = V4L2_PIX_FMT_NV16, - .name = "NV16", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, - }, { - .fourcc = V4L2_PIX_FMT_NV61, - .name = "NV61", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, - }, -}; - -/* This will be corrected as we get more formats */ -static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt) -{ - return fmt->packing == SOC_MBUS_PACKING_NONE || - (fmt->bits_per_sample == 8 && - fmt->packing == SOC_MBUS_PACKING_1_5X8) || - (fmt->bits_per_sample == 8 && - fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || - (fmt->bits_per_sample > 8 && - fmt->packing == SOC_MBUS_PACKING_EXTEND16); -} - -static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl) -{ - return container_of(ctrl->handler, struct soc_camera_device, - ctrl_handler); -} - -static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct soc_camera_device *icd = ctrl_to_icd(ctrl); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - switch (ctrl->id) { - case V4L2_CID_SHARPNESS: - switch (icd->current_fmt->host_fmt->fourcc) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - ceu_write(pcdev, CLFCR, !ctrl->val); - return 0; - } - break; - } - - return -EINVAL; -} - -static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = { - .s_ctrl = sh_mobile_ceu_s_ctrl, -}; - -static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx, - struct soc_camera_format_xlate *xlate) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct device *dev = icd->parent; - struct soc_camera_host *ici = to_soc_camera_host(dev); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - int ret, k, n; - int formats = 0; - struct sh_mobile_ceu_cam *cam; - struct v4l2_subdev_mbus_code_enum code = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .index = idx, - }; - const struct soc_mbus_pixelfmt *fmt; - - ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code); - if (ret < 0) - /* No more formats */ - return 0; - - fmt = soc_mbus_get_fmtdesc(code.code); - if (!fmt) { - dev_warn(dev, "unsupported format code #%u: %d\n", idx, code.code); - return 0; - } - - ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); - if (ret < 0) - return 0; - - if (!icd->host_priv) { - struct v4l2_subdev_format fmt = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - struct v4l2_mbus_framefmt *mf = &fmt.format; - struct v4l2_rect rect; - int shift = 0; - - /* Add our control */ - v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops, - V4L2_CID_SHARPNESS, 0, 1, 1, 1); - if (icd->ctrl_handler.error) - return icd->ctrl_handler.error; - - /* FIXME: subwindow is lost between close / open */ - - /* Cache current client geometry */ - ret = soc_camera_client_g_rect(sd, &rect); - if (ret < 0) - return ret; - - /* First time */ - ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); - if (ret < 0) - return ret; - - /* - * All currently existing CEU implementations support 2560x1920 - * or larger frames. If the sensor is proposing too big a frame, - * don't bother with possibly supportred by the CEU larger - * sizes, just try VGA multiples. If needed, this can be - * adjusted in the future. - */ - while ((mf->width > pcdev->max_width || - mf->height > pcdev->max_height) && shift < 4) { - /* Try 2560x1920, 1280x960, 640x480, 320x240 */ - mf->width = 2560 >> shift; - mf->height = 1920 >> shift; - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, NULL, &fmt); - if (ret < 0) - return ret; - shift++; - } - - if (shift == 4) { - dev_err(dev, "Failed to configure the client below %ux%x\n", - mf->width, mf->height); - return -EIO; - } - - dev_geo(dev, "camera fmt %ux%u\n", mf->width, mf->height); - - cam = kzalloc(sizeof(*cam), GFP_KERNEL); - if (!cam) - return -ENOMEM; - - /* We are called with current camera crop, initialise subrect with it */ - cam->rect = rect; - cam->subrect = rect; - - cam->width = mf->width; - cam->height = mf->height; - - icd->host_priv = cam; - } else { - cam = icd->host_priv; - } - - /* Beginning of a pass */ - if (!idx) - cam->extra_fmt = NULL; - - switch (code.code) { - case MEDIA_BUS_FMT_UYVY8_2X8: - case MEDIA_BUS_FMT_VYUY8_2X8: - case MEDIA_BUS_FMT_YUYV8_2X8: - case MEDIA_BUS_FMT_YVYU8_2X8: - if (cam->extra_fmt) - break; - - /* - * Our case is simple so far: for any of the above four camera - * formats we add all our four synthesized NV* formats, so, - * just marking the device with a single flag suffices. If - * the format generation rules are more complex, you would have - * to actually hang your already added / counted formats onto - * the host_priv pointer and check whether the format you're - * going to add now is already there. - */ - cam->extra_fmt = sh_mobile_ceu_formats; - - n = ARRAY_SIZE(sh_mobile_ceu_formats); - formats += n; - for (k = 0; xlate && k < n; k++) { - xlate->host_fmt = &sh_mobile_ceu_formats[k]; - xlate->code = code.code; - xlate++; - dev_dbg(dev, "Providing format %s using code %d\n", - sh_mobile_ceu_formats[k].name, code.code); - } - break; - default: - if (!sh_mobile_ceu_packing_supported(fmt)) - return 0; - } - - /* Generic pass-through */ - formats++; - if (xlate) { - xlate->host_fmt = fmt; - xlate->code = code.code; - xlate++; - dev_dbg(dev, "Providing format %s in pass-through mode\n", - fmt->name); - } - - return formats; -} - -static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd) -{ - kfree(icd->host_priv); - icd->host_priv = NULL; -} - -#define scale_down(size, scale) soc_camera_shift_scale(size, 12, scale) -#define calc_generic_scale(in, out) soc_camera_calc_scale(in, 12, out) - -/* - * CEU can scale and crop, but we don't want to waste bandwidth and kill the - * framerate by always requesting the maximum image from the client. See - * Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst for a description of - * scaling and cropping algorithms and for the meaning of referenced here steps. - */ -static int sh_mobile_ceu_set_selection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct v4l2_rect *rect = &sel->r; - struct device *dev = icd->parent; - struct soc_camera_host *ici = to_soc_camera_host(dev); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct v4l2_selection cam_sel; - struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct v4l2_rect *cam_rect = &cam_sel.r; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_format fmt = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - struct v4l2_mbus_framefmt *mf = &fmt.format; - unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v, - out_width, out_height; - int interm_width, interm_height; - u32 capsr, cflcr; - int ret; - - dev_geo(dev, "S_SELECTION(%ux%u@%u:%u)\n", rect->width, rect->height, - rect->left, rect->top); - - /* During camera cropping its output window can change too, stop CEU */ - capsr = capture_save_reset(pcdev); - dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); - - /* - * 1. - 2. Apply iterative camera S_SELECTION for new input window, read back - * actual camera rectangle. - */ - ret = soc_camera_client_s_selection(sd, sel, &cam_sel, - &cam->rect, &cam->subrect); - if (ret < 0) - return ret; - - dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n", - cam_rect->width, cam_rect->height, - cam_rect->left, cam_rect->top); - - /* On success cam_crop contains current camera crop */ - - /* 3. Retrieve camera output window */ - ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); - if (ret < 0) - return ret; - - if (mf->width > pcdev->max_width || mf->height > pcdev->max_height) - return -EINVAL; - - /* 4. Calculate camera scales */ - scale_cam_h = calc_generic_scale(cam_rect->width, mf->width); - scale_cam_v = calc_generic_scale(cam_rect->height, mf->height); - - /* Calculate intermediate window */ - interm_width = scale_down(rect->width, scale_cam_h); - interm_height = scale_down(rect->height, scale_cam_v); - - if (interm_width < icd->user_width) { - u32 new_scale_h; - - new_scale_h = calc_generic_scale(rect->width, icd->user_width); - - mf->width = scale_down(cam_rect->width, new_scale_h); - } - - if (interm_height < icd->user_height) { - u32 new_scale_v; - - new_scale_v = calc_generic_scale(rect->height, icd->user_height); - - mf->height = scale_down(cam_rect->height, new_scale_v); - } - - if (interm_width < icd->user_width || interm_height < icd->user_height) { - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, NULL, &fmt); - if (ret < 0) - return ret; - - dev_geo(dev, "New camera output %ux%u\n", mf->width, mf->height); - scale_cam_h = calc_generic_scale(cam_rect->width, mf->width); - scale_cam_v = calc_generic_scale(cam_rect->height, mf->height); - interm_width = scale_down(rect->width, scale_cam_h); - interm_height = scale_down(rect->height, scale_cam_v); - } - - /* Cache camera output window */ - cam->width = mf->width; - cam->height = mf->height; - - if (pcdev->image_mode) { - out_width = min(interm_width, icd->user_width); - out_height = min(interm_height, icd->user_height); - } else { - out_width = interm_width; - out_height = interm_height; - } - - /* - * 5. Calculate CEU scales from camera scales from results of (5) and - * the user window - */ - scale_ceu_h = calc_scale(interm_width, &out_width); - scale_ceu_v = calc_scale(interm_height, &out_height); - - dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v); - - /* Apply CEU scales. */ - cflcr = scale_ceu_h | (scale_ceu_v << 16); - if (cflcr != pcdev->cflcr) { - pcdev->cflcr = cflcr; - ceu_write(pcdev, CFLCR, cflcr); - } - - icd->user_width = out_width & ~3; - icd->user_height = out_height & ~3; - /* Offsets are applied at the CEU scaling filter input */ - cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1; - cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1; - - /* 6. Use CEU cropping to crop to the new window. */ - sh_mobile_ceu_set_rect(icd); - - cam->subrect = *rect; - - dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n", - icd->user_width, icd->user_height, - cam->ceu_left, cam->ceu_top); - - /* Restore capture. The CE bit can be cleared by the hardware */ - if (pcdev->active) - capsr |= 1; - capture_restore(pcdev, capsr); - - /* Even if only camera cropping succeeded */ - return ret; -} - -static int sh_mobile_ceu_get_selection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct sh_mobile_ceu_cam *cam = icd->host_priv; - - sel->r = cam->subrect; - - return 0; -} - -/* Similar to set_crop multistage iterative algorithm */ -static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct device *dev = icd->parent; - struct soc_camera_host *ici = to_soc_camera_host(dev); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct v4l2_pix_format *pix = &f->fmt.pix; - struct v4l2_mbus_framefmt mf; - __u32 pixfmt = pix->pixelformat; - const struct soc_camera_format_xlate *xlate; - unsigned int ceu_sub_width = pcdev->max_width, - ceu_sub_height = pcdev->max_height; - u16 scale_v, scale_h; - int ret; - bool image_mode; - enum v4l2_field field; - - switch (pix->field) { - default: - pix->field = V4L2_FIELD_NONE; - /* fall-through */ - case V4L2_FIELD_INTERLACED_TB: - case V4L2_FIELD_INTERLACED_BT: - case V4L2_FIELD_NONE: - field = pix->field; - break; - case V4L2_FIELD_INTERLACED: - field = V4L2_FIELD_INTERLACED_TB; - break; - } - - xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); - if (!xlate) { - dev_warn(dev, "Format %x not found\n", pixfmt); - return -EINVAL; - } - - /* 1.-4. Calculate desired client output geometry */ - soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf, 12); - mf.field = pix->field; - mf.colorspace = pix->colorspace; - mf.code = xlate->code; - - switch (pixfmt) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - image_mode = true; - break; - default: - image_mode = false; - } - - dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code, - pix->width, pix->height); - - dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height); - - /* 5. - 9. */ - ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect, - &mf, &ceu_sub_width, &ceu_sub_height, - image_mode && V4L2_FIELD_NONE == field, 12); - - dev_geo(dev, "5-9: client scale return %d\n", ret); - - /* Done with the camera. Now see if we can improve the result */ - - dev_geo(dev, "fmt %ux%u, requested %ux%u\n", - mf.width, mf.height, pix->width, pix->height); - if (ret < 0) - return ret; - - if (mf.code != xlate->code) - return -EINVAL; - - /* 9. Prepare CEU crop */ - cam->width = mf.width; - cam->height = mf.height; - - /* 10. Use CEU scaling to scale to the requested user window. */ - - /* We cannot scale up */ - if (pix->width > ceu_sub_width) - ceu_sub_width = pix->width; - - if (pix->height > ceu_sub_height) - ceu_sub_height = pix->height; - - pix->colorspace = mf.colorspace; - - if (image_mode) { - /* Scale pix->{width x height} down to width x height */ - scale_h = calc_scale(ceu_sub_width, &pix->width); - scale_v = calc_scale(ceu_sub_height, &pix->height); - } else { - pix->width = ceu_sub_width; - pix->height = ceu_sub_height; - scale_h = 0; - scale_v = 0; - } - - pcdev->cflcr = scale_h | (scale_v << 16); - - /* - * We have calculated CFLCR, the actual configuration will be performed - * in sh_mobile_ceu_set_bus_param() - */ - - dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", - ceu_sub_width, scale_h, pix->width, - ceu_sub_height, scale_v, pix->height); - - cam->code = xlate->code; - icd->current_fmt = xlate; - - pcdev->field = field; - pcdev->image_mode = image_mode; - - /* CFSZR requirement */ - pix->width &= ~3; - pix->height &= ~3; - - return 0; -} - -#define CEU_CHDW_MAX 8188U /* Maximum line stride */ - -static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - const struct soc_camera_format_xlate *xlate; - struct v4l2_pix_format *pix = &f->fmt.pix; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_pad_config pad_cfg; - struct v4l2_subdev_format format = { - .which = V4L2_SUBDEV_FORMAT_TRY, - }; - struct v4l2_mbus_framefmt *mf = &format.format; - __u32 pixfmt = pix->pixelformat; - int width, height; - int ret; - - dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n", - pixfmt, pix->width, pix->height); - - xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); - if (!xlate) { - xlate = icd->current_fmt; - dev_dbg(icd->parent, "Format %x not found, keeping %x\n", - pixfmt, xlate->host_fmt->fourcc); - pixfmt = xlate->host_fmt->fourcc; - pix->pixelformat = pixfmt; - pix->colorspace = icd->colorspace; - } - - /* FIXME: calculate using depth and bus width */ - - /* CFSZR requires height and width to be 4-pixel aligned */ - v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2, - &pix->height, 4, pcdev->max_height, 2, 0); - - width = pix->width; - height = pix->height; - - /* limit to sensor capabilities */ - mf->width = pix->width; - mf->height = pix->height; - mf->field = pix->field; - mf->code = xlate->code; - mf->colorspace = pix->colorspace; - - ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), - pad, set_fmt, &pad_cfg, &format); - if (ret < 0) - return ret; - - pix->width = mf->width; - pix->height = mf->height; - pix->field = mf->field; - pix->colorspace = mf->colorspace; - - switch (pixfmt) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - /* FIXME: check against rect_max after converting soc-camera */ - /* We can scale precisely, need a bigger image from camera */ - if (pix->width < width || pix->height < height) { - /* - * We presume, the sensor behaves sanely, i.e., if - * requested a bigger rectangle, it will not return a - * smaller one. - */ - mf->width = pcdev->max_width; - mf->height = pcdev->max_height; - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, &pad_cfg, &format); - if (ret < 0) { - /* Shouldn't actually happen... */ - dev_err(icd->parent, - "FIXME: client try_fmt() = %d\n", ret); - return ret; - } - } - /* We will scale exactly */ - if (mf->width > width) - pix->width = width; - if (mf->height > height) - pix->height = height; - - pix->bytesperline = max(pix->bytesperline, pix->width); - pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX); - pix->bytesperline &= ~3; - break; - - default: - /* Configurable stride isn't supported in pass-through mode. */ - pix->bytesperline = 0; - } - - pix->width &= ~3; - pix->height &= ~3; - pix->sizeimage = 0; - - dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n", - __func__, ret, pix->pixelformat, pix->width, pix->height); - - return ret; -} - -static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - u32 out_width = icd->user_width, out_height = icd->user_height; - int ret; - - /* Freeze queue */ - pcdev->frozen = 1; - /* Wait for frame */ - ret = wait_for_completion_interruptible(&pcdev->complete); - /* Stop the client */ - ret = v4l2_subdev_call(sd, video, s_stream, 0); - if (ret < 0) - dev_warn(icd->parent, - "Client failed to stop the stream: %d\n", ret); - else - /* Do the crop, if it fails, there's nothing more we can do */ - sh_mobile_ceu_set_selection(icd, sel); - - dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height); - - if (icd->user_width != out_width || icd->user_height != out_height) { - struct v4l2_format f = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .fmt.pix = { - .width = out_width, - .height = out_height, - .pixelformat = icd->current_fmt->host_fmt->fourcc, - .field = pcdev->field, - .colorspace = icd->colorspace, - }, - }; - ret = sh_mobile_ceu_set_fmt(icd, &f); - if (!ret && (out_width != f.fmt.pix.width || - out_height != f.fmt.pix.height)) - ret = -EINVAL; - if (!ret) { - icd->user_width = out_width & ~3; - icd->user_height = out_height & ~3; - ret = sh_mobile_ceu_set_bus_param(icd); - } - } - - /* Thaw the queue */ - pcdev->frozen = 0; - spin_lock_irq(&pcdev->lock); - sh_mobile_ceu_capture(pcdev); - spin_unlock_irq(&pcdev->lock); - /* Start the client */ - ret = v4l2_subdev_call(sd, video, s_stream, 1); - return ret; -} - -static __poll_t sh_mobile_ceu_poll(struct file *file, poll_table *pt) -{ - struct soc_camera_device *icd = file->private_data; - - return vb2_poll(&icd->vb2_vidq, file, pt); -} - -static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, - struct v4l2_capability *cap) -{ - strscpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); - strscpy(cap->driver, "sh_mobile_ceu", sizeof(cap->driver)); - strscpy(cap->bus_info, "platform:sh_mobile_ceu", sizeof(cap->bus_info)); - cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; - cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; - - return 0; -} - -static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q, - struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - q->io_modes = VB2_MMAP | VB2_USERPTR; - q->drv_priv = icd; - q->ops = &sh_mobile_ceu_videobuf_ops; - q->mem_ops = &vb2_dma_contig_memops; - q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer); - q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - q->lock = &ici->host_lock; - q->dev = ici->v4l2_dev.dev; - - return vb2_queue_init(q); -} - -static struct soc_camera_host_ops sh_mobile_ceu_host_ops = { - .owner = THIS_MODULE, - .add = sh_mobile_ceu_add_device, - .remove = sh_mobile_ceu_remove_device, - .clock_start = sh_mobile_ceu_clock_start, - .clock_stop = sh_mobile_ceu_clock_stop, - .get_formats = sh_mobile_ceu_get_formats, - .put_formats = sh_mobile_ceu_put_formats, - .get_selection = sh_mobile_ceu_get_selection, - .set_selection = sh_mobile_ceu_set_selection, - .set_liveselection = sh_mobile_ceu_set_liveselection, - .set_fmt = sh_mobile_ceu_set_fmt, - .try_fmt = sh_mobile_ceu_try_fmt, - .poll = sh_mobile_ceu_poll, - .querycap = sh_mobile_ceu_querycap, - .set_bus_param = sh_mobile_ceu_set_bus_param, - .init_videobuf2 = sh_mobile_ceu_init_videobuf, -}; - -struct bus_wait { - struct notifier_block notifier; - struct completion completion; - struct device *dev; -}; - -static int bus_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - struct bus_wait *wait = container_of(nb, struct bus_wait, notifier); - - if (wait->dev != dev) - return NOTIFY_DONE; - - switch (action) { - case BUS_NOTIFY_UNBOUND_DRIVER: - /* Protect from module unloading */ - wait_for_completion(&wait->completion); - return NOTIFY_OK; - } - return NOTIFY_DONE; -} - -static int sh_mobile_ceu_probe(struct platform_device *pdev) -{ - struct sh_mobile_ceu_dev *pcdev; - struct resource *res; - void __iomem *base; - unsigned int irq; - int err; - struct bus_wait wait = { - .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion), - .notifier.notifier_call = bus_notify, - }; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - irq = platform_get_irq(pdev, 0); - if (!res || (int)irq <= 0) { - dev_err(&pdev->dev, "Not enough CEU platform resources.\n"); - return -ENODEV; - } - - pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL); - if (!pcdev) { - dev_err(&pdev->dev, "Could not allocate pcdev\n"); - return -ENOMEM; - } - - INIT_LIST_HEAD(&pcdev->capture); - spin_lock_init(&pcdev->lock); - init_completion(&pcdev->complete); - - pcdev->pdata = pdev->dev.platform_data; - if (!pcdev->pdata && !pdev->dev.of_node) { - dev_err(&pdev->dev, "CEU platform data not set.\n"); - return -EINVAL; - } - - /* TODO: implement per-device bus flags */ - if (pcdev->pdata) { - pcdev->max_width = pcdev->pdata->max_width; - pcdev->max_height = pcdev->pdata->max_height; - pcdev->flags = pcdev->pdata->flags; - } - pcdev->field = V4L2_FIELD_NONE; - - if (!pcdev->max_width) { - unsigned int v; - err = of_property_read_u32(pdev->dev.of_node, "renesas,max-width", &v); - if (!err) - pcdev->max_width = v; - - if (!pcdev->max_width) - pcdev->max_width = 2560; - } - if (!pcdev->max_height) { - unsigned int v; - err = of_property_read_u32(pdev->dev.of_node, "renesas,max-height", &v); - if (!err) - pcdev->max_height = v; - - if (!pcdev->max_height) - pcdev->max_height = 1920; - } - - base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); - - pcdev->irq = irq; - pcdev->base = base; - pcdev->video_limit = 0; /* only enabled if second resource exists */ - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) { - err = dma_declare_coherent_memory(&pdev->dev, res->start, - res->start, - resource_size(res), - DMA_MEMORY_EXCLUSIVE); - if (err) { - dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); - return err; - } - - pcdev->video_limit = resource_size(res); - } - - /* request irq */ - err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq, - 0, dev_name(&pdev->dev), pcdev); - if (err) { - dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); - goto exit_release_mem; - } - - pm_suspend_ignore_children(&pdev->dev, true); - pm_runtime_enable(&pdev->dev); - pm_runtime_resume(&pdev->dev); - - pcdev->ici.priv = pcdev; - pcdev->ici.v4l2_dev.dev = &pdev->dev; - pcdev->ici.nr = pdev->id; - pcdev->ici.drv_name = dev_name(&pdev->dev); - pcdev->ici.ops = &sh_mobile_ceu_host_ops; - pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE; - - if (pcdev->pdata && pcdev->pdata->asd_sizes) { - pcdev->ici.asd = pcdev->pdata->asd; - pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes; - } - - err = soc_camera_host_register(&pcdev->ici); - if (err) - goto exit_free_clk; - - return 0; - -exit_free_clk: - pm_runtime_disable(&pdev->dev); -exit_release_mem: - if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) - dma_release_declared_memory(&pdev->dev); - return err; -} - -static int sh_mobile_ceu_remove(struct platform_device *pdev) -{ - struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); - - soc_camera_host_unregister(soc_host); - pm_runtime_disable(&pdev->dev); - if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) - dma_release_declared_memory(&pdev->dev); - - return 0; -} - -static int sh_mobile_ceu_runtime_nop(struct device *dev) -{ - /* Runtime PM callback shared between ->runtime_suspend() - * and ->runtime_resume(). Simply returns success. - * - * This driver re-initializes all registers after - * pm_runtime_get_sync() anyway so there is no need - * to save and restore registers here. - */ - return 0; -} - -static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { - .runtime_suspend = sh_mobile_ceu_runtime_nop, - .runtime_resume = sh_mobile_ceu_runtime_nop, -}; - -static const struct of_device_id sh_mobile_ceu_of_match[] = { - { .compatible = "renesas,sh-mobile-ceu" }, - { } -}; -MODULE_DEVICE_TABLE(of, sh_mobile_ceu_of_match); - -static struct platform_driver sh_mobile_ceu_driver = { - .driver = { - .name = "sh_mobile_ceu", - .pm = &sh_mobile_ceu_dev_pm_ops, - .of_match_table = sh_mobile_ceu_of_match, - }, - .probe = sh_mobile_ceu_probe, - .remove = sh_mobile_ceu_remove, -}; - -module_platform_driver(sh_mobile_ceu_driver); - -MODULE_DESCRIPTION("SuperH Mobile CEU driver"); -MODULE_AUTHOR("Magnus Damm"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("0.1.0"); -MODULE_ALIAS("platform:sh_mobile_ceu"); diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c deleted file mode 100644 index 21034339cdcb..000000000000 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ /dev/null @@ -1,2170 +0,0 @@ -/* - * camera image capture (abstract) bus driver - * - * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> - * - * This driver provides an interface between platform-specific camera - * busses and camera devices. It should be used if the camera is - * connected not over a "proper" bus like PCI or USB, but over a - * special bus, like, for example, the Quick Capture interface on PXA270 - * SoCs. Later it should also be used for i.MX31 SoCs from Freescale. - * It can handle multiple cameras and / or multiple busses, which can - * be used, e.g., in stereo-vision applications. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/device.h> -#include <linux/err.h> -#include <linux/i2c.h> -#include <linux/init.h> -#include <linux/list.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/of_graph.h> -#include <linux/platform_device.h> -#include <linux/pm_runtime.h> -#include <linux/regulator/consumer.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> - -#include <media/soc_camera.h> -#include <media/drv-intf/soc_mediabus.h> -#include <media/v4l2-async.h> -#include <media/v4l2-clk.h> -#include <media/v4l2-common.h> -#include <media/v4l2-ioctl.h> -#include <media/v4l2-dev.h> -#include <media/v4l2-fwnode.h> -#include <media/videobuf2-v4l2.h> - -/* Default to VGA resolution */ -#define DEFAULT_WIDTH 640 -#define DEFAULT_HEIGHT 480 - -#define MAP_MAX_NUM 32 -static DECLARE_BITMAP(device_map, MAP_MAX_NUM); -static LIST_HEAD(hosts); -static LIST_HEAD(devices); -/* - * Protects lists and bitmaps of hosts and devices. - * Lock nesting: Ok to take ->host_lock under list_lock. - */ -static DEFINE_MUTEX(list_lock); - -struct soc_camera_async_client { - struct v4l2_async_subdev *sensor; - struct v4l2_async_notifier notifier; - struct platform_device *pdev; - struct list_head list; /* needed for clean up */ -}; - -static int soc_camera_video_start(struct soc_camera_device *icd); -static int video_dev_create(struct soc_camera_device *icd); - -int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd, - struct v4l2_clk *clk) -{ - int ret; - bool clock_toggle; - - if (clk && (!ssdd->unbalanced_power || - !test_and_set_bit(0, &ssdd->clock_state))) { - ret = v4l2_clk_enable(clk); - if (ret < 0) { - dev_err(dev, "Cannot enable clock: %d\n", ret); - return ret; - } - clock_toggle = true; - } else { - clock_toggle = false; - } - - ret = regulator_bulk_enable(ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); - if (ret < 0) { - dev_err(dev, "Cannot enable regulators\n"); - goto eregenable; - } - - if (ssdd->power) { - ret = ssdd->power(dev, 1); - if (ret < 0) { - dev_err(dev, - "Platform failed to power-on the camera.\n"); - goto epwron; - } - } - - return 0; - -epwron: - regulator_bulk_disable(ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); -eregenable: - if (clock_toggle) - v4l2_clk_disable(clk); - - return ret; -} -EXPORT_SYMBOL(soc_camera_power_on); - -int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd, - struct v4l2_clk *clk) -{ - int ret = 0; - int err; - - if (ssdd->power) { - err = ssdd->power(dev, 0); - if (err < 0) { - dev_err(dev, - "Platform failed to power-off the camera.\n"); - ret = err; - } - } - - err = regulator_bulk_disable(ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); - if (err < 0) { - dev_err(dev, "Cannot disable regulators\n"); - ret = ret ? : err; - } - - if (clk && (!ssdd->unbalanced_power || test_and_clear_bit(0, &ssdd->clock_state))) - v4l2_clk_disable(clk); - - return ret; -} -EXPORT_SYMBOL(soc_camera_power_off); - -int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd) -{ - /* Should not have any effect in synchronous case */ - return devm_regulator_bulk_get(dev, ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); -} -EXPORT_SYMBOL(soc_camera_power_init); - -static int __soc_camera_power_on(struct soc_camera_device *icd) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - int ret; - - ret = v4l2_subdev_call(sd, core, s_power, 1); - if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) - return ret; - - return 0; -} - -static int __soc_camera_power_off(struct soc_camera_device *icd) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - int ret; - - ret = v4l2_subdev_call(sd, core, s_power, 0); - if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) - return ret; - - return 0; -} - -static int soc_camera_clock_start(struct soc_camera_host *ici) -{ - int ret; - - if (!ici->ops->clock_start) - return 0; - - mutex_lock(&ici->clk_lock); - ret = ici->ops->clock_start(ici); - mutex_unlock(&ici->clk_lock); - - return ret; -} - -static void soc_camera_clock_stop(struct soc_camera_host *ici) -{ - if (!ici->ops->clock_stop) - return; - - mutex_lock(&ici->clk_lock); - ici->ops->clock_stop(ici); - mutex_unlock(&ici->clk_lock); -} - -const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( - struct soc_camera_device *icd, unsigned int fourcc) -{ - unsigned int i; - - for (i = 0; i < icd->num_user_formats; i++) - if (icd->user_formats[i].host_fmt->fourcc == fourcc) - return icd->user_formats + i; - return NULL; -} -EXPORT_SYMBOL(soc_camera_xlate_by_fourcc); - -/** - * soc_camera_apply_board_flags() - apply platform SOCAM_SENSOR_INVERT_* flags - * @ssdd: camera platform parameters - * @cfg: media bus configuration - * @return: resulting flags - */ -unsigned long soc_camera_apply_board_flags(struct soc_camera_subdev_desc *ssdd, - const struct v4l2_mbus_config *cfg) -{ - unsigned long f, flags = cfg->flags; - - /* If only one of the two polarities is supported, switch to the opposite */ - if (ssdd->flags & SOCAM_SENSOR_INVERT_HSYNC) { - f = flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW); - if (f == V4L2_MBUS_HSYNC_ACTIVE_HIGH || f == V4L2_MBUS_HSYNC_ACTIVE_LOW) - flags ^= V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW; - } - - if (ssdd->flags & SOCAM_SENSOR_INVERT_VSYNC) { - f = flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW); - if (f == V4L2_MBUS_VSYNC_ACTIVE_HIGH || f == V4L2_MBUS_VSYNC_ACTIVE_LOW) - flags ^= V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW; - } - - if (ssdd->flags & SOCAM_SENSOR_INVERT_PCLK) { - f = flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING); - if (f == V4L2_MBUS_PCLK_SAMPLE_RISING || f == V4L2_MBUS_PCLK_SAMPLE_FALLING) - flags ^= V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING; - } - - return flags; -} -EXPORT_SYMBOL(soc_camera_apply_board_flags); - -#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ - ((x) >> 24) & 0xff - -static int soc_camera_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - const struct soc_camera_format_xlate *xlate; - struct v4l2_pix_format *pix = &f->fmt.pix; - int ret; - - dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n", - pixfmtstr(pix->pixelformat), pix->width, pix->height); - - if (pix->pixelformat != V4L2_PIX_FMT_JPEG && - !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { - pix->bytesperline = 0; - pix->sizeimage = 0; - } - - ret = ici->ops->try_fmt(icd, f); - if (ret < 0) - return ret; - - xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); - if (!xlate) - return -EINVAL; - - ret = soc_mbus_bytes_per_line(pix->width, xlate->host_fmt); - if (ret < 0) - return ret; - - pix->bytesperline = max_t(u32, pix->bytesperline, ret); - - ret = soc_mbus_image_size(xlate->host_fmt, pix->bytesperline, - pix->height); - if (ret < 0) - return ret; - - pix->sizeimage = max_t(u32, pix->sizeimage, ret); - - return 0; -} - -static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) -{ - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - /* Only single-plane capture is supported so far */ - if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - /* limit format to hardware capabilities */ - return soc_camera_try_fmt(icd, f); -} - -static int soc_camera_enum_input(struct file *file, void *priv, - struct v4l2_input *inp) -{ - struct soc_camera_device *icd = file->private_data; - - if (inp->index != 0) - return -EINVAL; - - /* default is camera */ - inp->type = V4L2_INPUT_TYPE_CAMERA; - inp->std = icd->vdev->tvnorms; - strscpy(inp->name, "Camera", sizeof(inp->name)); - - return 0; -} - -static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i) -{ - *i = 0; - - return 0; -} - -static int soc_camera_s_input(struct file *file, void *priv, unsigned int i) -{ - if (i > 0) - return -EINVAL; - - return 0; -} - -static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id a) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - - return v4l2_subdev_call(sd, video, s_std, a); -} - -static int soc_camera_g_std(struct file *file, void *priv, v4l2_std_id *a) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - - return v4l2_subdev_call(sd, video, g_std, a); -} - -static int soc_camera_enum_framesizes(struct file *file, void *fh, - struct v4l2_frmsizeenum *fsize) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - return ici->ops->enum_framesizes(icd, fsize); -} - -static int soc_camera_reqbufs(struct file *file, void *priv, - struct v4l2_requestbuffers *p) -{ - int ret; - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - if (icd->streamer && icd->streamer != file) - return -EBUSY; - - ret = vb2_reqbufs(&icd->vb2_vidq, p); - if (!ret) - icd->streamer = p->count ? file : NULL; - return ret; -} - -static int soc_camera_querybuf(struct file *file, void *priv, - struct v4l2_buffer *p) -{ - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - return vb2_querybuf(&icd->vb2_vidq, p); -} - -static int soc_camera_qbuf(struct file *file, void *priv, - struct v4l2_buffer *p) -{ - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - if (icd->streamer != file) - return -EBUSY; - - return vb2_qbuf(&icd->vb2_vidq, NULL, p); -} - -static int soc_camera_dqbuf(struct file *file, void *priv, - struct v4l2_buffer *p) -{ - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - if (icd->streamer != file) - return -EBUSY; - - return vb2_dqbuf(&icd->vb2_vidq, p, file->f_flags & O_NONBLOCK); -} - -static int soc_camera_create_bufs(struct file *file, void *priv, - struct v4l2_create_buffers *create) -{ - struct soc_camera_device *icd = file->private_data; - int ret; - - if (icd->streamer && icd->streamer != file) - return -EBUSY; - - ret = vb2_create_bufs(&icd->vb2_vidq, create); - if (!ret) - icd->streamer = file; - return ret; -} - -static int soc_camera_prepare_buf(struct file *file, void *priv, - struct v4l2_buffer *b) -{ - struct soc_camera_device *icd = file->private_data; - - return vb2_prepare_buf(&icd->vb2_vidq, NULL, b); -} - -static int soc_camera_expbuf(struct file *file, void *priv, - struct v4l2_exportbuffer *p) -{ - struct soc_camera_device *icd = file->private_data; - - if (icd->streamer && icd->streamer != file) - return -EBUSY; - return vb2_expbuf(&icd->vb2_vidq, p); -} - -/* Always entered with .host_lock held */ -static int soc_camera_init_user_formats(struct soc_camera_device *icd) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - unsigned int i, fmts = 0, raw_fmts = 0; - int ret; - struct v4l2_subdev_mbus_code_enum code = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - - while (!v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code)) { - raw_fmts++; - code.index++; - } - - if (!ici->ops->get_formats) - /* - * Fallback mode - the host will have to serve all - * sensor-provided formats one-to-one to the user - */ - fmts = raw_fmts; - else - /* - * First pass - only count formats this host-sensor - * configuration can provide - */ - for (i = 0; i < raw_fmts; i++) { - ret = ici->ops->get_formats(icd, i, NULL); - if (ret < 0) - return ret; - fmts += ret; - } - - if (!fmts) - return -ENXIO; - - icd->user_formats = - vmalloc(array_size(fmts, - sizeof(struct soc_camera_format_xlate))); - if (!icd->user_formats) - return -ENOMEM; - - dev_dbg(icd->pdev, "Found %d supported formats.\n", fmts); - - /* Second pass - actually fill data formats */ - fmts = 0; - for (i = 0; i < raw_fmts; i++) - if (!ici->ops->get_formats) { - code.index = i; - v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code); - icd->user_formats[fmts].host_fmt = - soc_mbus_get_fmtdesc(code.code); - if (icd->user_formats[fmts].host_fmt) - icd->user_formats[fmts++].code = code.code; - } else { - ret = ici->ops->get_formats(icd, i, - &icd->user_formats[fmts]); - if (ret < 0) - goto egfmt; - fmts += ret; - } - - icd->num_user_formats = fmts; - icd->current_fmt = &icd->user_formats[0]; - - return 0; - -egfmt: - vfree(icd->user_formats); - return ret; -} - -/* Always entered with .host_lock held */ -static void soc_camera_free_user_formats(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - if (ici->ops->put_formats) - ici->ops->put_formats(icd); - icd->current_fmt = NULL; - icd->num_user_formats = 0; - vfree(icd->user_formats); - icd->user_formats = NULL; -} - -/* Called with .vb_lock held, or from the first open(2), see comment there */ -static int soc_camera_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct v4l2_pix_format *pix = &f->fmt.pix; - int ret; - - dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n", - pixfmtstr(pix->pixelformat), pix->width, pix->height); - - /* We always call try_fmt() before set_fmt() or set_selection() */ - ret = soc_camera_try_fmt(icd, f); - if (ret < 0) - return ret; - - ret = ici->ops->set_fmt(icd, f); - if (ret < 0) { - return ret; - } else if (!icd->current_fmt || - icd->current_fmt->host_fmt->fourcc != pix->pixelformat) { - dev_err(icd->pdev, - "Host driver hasn't set up current format correctly!\n"); - return -EINVAL; - } - - icd->user_width = pix->width; - icd->user_height = pix->height; - icd->bytesperline = pix->bytesperline; - icd->sizeimage = pix->sizeimage; - icd->colorspace = pix->colorspace; - icd->field = pix->field; - - dev_dbg(icd->pdev, "set width: %d height: %d\n", - icd->user_width, icd->user_height); - - /* set physical bus parameters */ - return ici->ops->set_bus_param(icd); -} - -static int soc_camera_add_device(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - int ret; - - if (ici->icd) - return -EBUSY; - - if (!icd->clk) { - ret = soc_camera_clock_start(ici); - if (ret < 0) - return ret; - } - - if (ici->ops->add) { - ret = ici->ops->add(icd); - if (ret < 0) - goto eadd; - } - - ici->icd = icd; - - return 0; - -eadd: - if (!icd->clk) - soc_camera_clock_stop(ici); - return ret; -} - -static void soc_camera_remove_device(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - if (WARN_ON(icd != ici->icd)) - return; - - if (ici->ops->remove) - ici->ops->remove(icd); - if (!icd->clk) - soc_camera_clock_stop(ici); - ici->icd = NULL; -} - -static int soc_camera_open(struct file *file) -{ - struct video_device *vdev = video_devdata(file); - struct soc_camera_device *icd; - struct soc_camera_host *ici; - int ret; - - /* - * Don't mess with the host during probe: wait until the loop in - * scan_add_host() completes. Also protect against a race with - * soc_camera_host_unregister(). - */ - if (mutex_lock_interruptible(&list_lock)) - return -ERESTARTSYS; - - if (!vdev || !video_is_registered(vdev)) { - mutex_unlock(&list_lock); - return -ENODEV; - } - - icd = video_get_drvdata(vdev); - ici = to_soc_camera_host(icd->parent); - - ret = try_module_get(ici->ops->owner) ? 0 : -ENODEV; - mutex_unlock(&list_lock); - - if (ret < 0) { - dev_err(icd->pdev, "Couldn't lock capture bus driver.\n"); - return ret; - } - - if (!to_soc_camera_control(icd)) { - /* No device driver attached */ - ret = -ENODEV; - goto econtrol; - } - - if (mutex_lock_interruptible(&ici->host_lock)) { - ret = -ERESTARTSYS; - goto elockhost; - } - icd->use_count++; - - /* Now we really have to activate the camera */ - if (icd->use_count == 1) { - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - /* Restore parameters before the last close() per V4L2 API */ - struct v4l2_format f = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .fmt.pix = { - .width = icd->user_width, - .height = icd->user_height, - .field = icd->field, - .colorspace = icd->colorspace, - .pixelformat = - icd->current_fmt->host_fmt->fourcc, - }, - }; - - /* The camera could have been already on, try to reset */ - if (sdesc->subdev_desc.reset) - if (icd->control) - sdesc->subdev_desc.reset(icd->control); - - ret = soc_camera_add_device(icd); - if (ret < 0) { - dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret); - goto eiciadd; - } - - ret = __soc_camera_power_on(icd); - if (ret < 0) - goto epower; - - pm_runtime_enable(&icd->vdev->dev); - ret = pm_runtime_resume(&icd->vdev->dev); - if (ret < 0 && ret != -ENOSYS) - goto eresume; - - /* - * Try to configure with default parameters. Notice: this is the - * very first open, so, we cannot race against other calls, - * apart from someone else calling open() simultaneously, but - * .host_lock is protecting us against it. - */ - ret = soc_camera_set_fmt(icd, &f); - if (ret < 0) - goto esfmt; - - ret = ici->ops->init_videobuf2(&icd->vb2_vidq, icd); - if (ret < 0) - goto einitvb; - v4l2_ctrl_handler_setup(&icd->ctrl_handler); - } - mutex_unlock(&ici->host_lock); - - file->private_data = icd; - dev_dbg(icd->pdev, "camera device open\n"); - - return 0; - - /* - * All errors are entered with the .host_lock held, first four also - * with use_count == 1 - */ -einitvb: -esfmt: - pm_runtime_disable(&icd->vdev->dev); -eresume: - __soc_camera_power_off(icd); -epower: - soc_camera_remove_device(icd); -eiciadd: - icd->use_count--; - mutex_unlock(&ici->host_lock); -elockhost: -econtrol: - module_put(ici->ops->owner); - - return ret; -} - -static int soc_camera_close(struct file *file) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - mutex_lock(&ici->host_lock); - if (icd->streamer == file) { - if (ici->ops->init_videobuf2) - vb2_queue_release(&icd->vb2_vidq); - icd->streamer = NULL; - } - icd->use_count--; - if (!icd->use_count) { - pm_runtime_suspend(&icd->vdev->dev); - pm_runtime_disable(&icd->vdev->dev); - - __soc_camera_power_off(icd); - - soc_camera_remove_device(icd); - } - - mutex_unlock(&ici->host_lock); - - module_put(ici->ops->owner); - - dev_dbg(icd->pdev, "camera device close\n"); - - return 0; -} - -static ssize_t soc_camera_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - dev_dbg(icd->pdev, "read called, buf %p\n", buf); - - if (ici->ops->init_videobuf2 && icd->vb2_vidq.io_modes & VB2_READ) - return vb2_read(&icd->vb2_vidq, buf, count, ppos, - file->f_flags & O_NONBLOCK); - - dev_err(icd->pdev, "camera device read not implemented\n"); - - return -EINVAL; -} - -static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - int err; - - dev_dbg(icd->pdev, "mmap called, vma=%p\n", vma); - - if (icd->streamer != file) - return -EBUSY; - - if (mutex_lock_interruptible(&ici->host_lock)) - return -ERESTARTSYS; - err = vb2_mmap(&icd->vb2_vidq, vma); - mutex_unlock(&ici->host_lock); - - dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n", - (unsigned long)vma->vm_start, - (unsigned long)vma->vm_end - (unsigned long)vma->vm_start, - err); - - return err; -} - -static __poll_t soc_camera_poll(struct file *file, poll_table *pt) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - __poll_t res = EPOLLERR; - - if (icd->streamer != file) - return EPOLLERR; - - mutex_lock(&ici->host_lock); - res = ici->ops->poll(file, pt); - mutex_unlock(&ici->host_lock); - return res; -} - -static const struct v4l2_file_operations soc_camera_fops = { - .owner = THIS_MODULE, - .open = soc_camera_open, - .release = soc_camera_close, - .unlocked_ioctl = video_ioctl2, - .read = soc_camera_read, - .mmap = soc_camera_mmap, - .poll = soc_camera_poll, -}; - -static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) -{ - struct soc_camera_device *icd = file->private_data; - int ret; - - WARN_ON(priv != file->private_data); - - if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { - dev_warn(icd->pdev, "Wrong buf-type %d\n", f->type); - return -EINVAL; - } - - if (icd->streamer && icd->streamer != file) - return -EBUSY; - - if (vb2_is_streaming(&icd->vb2_vidq)) { - dev_err(icd->pdev, "S_FMT denied: queue initialised\n"); - return -EBUSY; - } - - ret = soc_camera_set_fmt(icd, f); - - if (!ret && !icd->streamer) - icd->streamer = file; - - return ret; -} - -static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_fmtdesc *f) -{ - struct soc_camera_device *icd = file->private_data; - const struct soc_mbus_pixelfmt *format; - - WARN_ON(priv != file->private_data); - - if (f->index >= icd->num_user_formats) - return -EINVAL; - - format = icd->user_formats[f->index].host_fmt; - - if (format->name) - strscpy(f->description, format->name, sizeof(f->description)); - f->pixelformat = format->fourcc; - return 0; -} - -static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_pix_format *pix = &f->fmt.pix; - - WARN_ON(priv != file->private_data); - - if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - pix->width = icd->user_width; - pix->height = icd->user_height; - pix->bytesperline = icd->bytesperline; - pix->sizeimage = icd->sizeimage; - pix->field = icd->field; - pix->pixelformat = icd->current_fmt->host_fmt->fourcc; - pix->colorspace = icd->colorspace; - dev_dbg(icd->pdev, "current_fmt->fourcc: 0x%08x\n", - icd->current_fmt->host_fmt->fourcc); - return 0; -} - -static int soc_camera_querycap(struct file *file, void *priv, - struct v4l2_capability *cap) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - WARN_ON(priv != file->private_data); - - strscpy(cap->driver, ici->drv_name, sizeof(cap->driver)); - return ici->ops->querycap(ici, cap); -} - -static int soc_camera_streamon(struct file *file, void *priv, - enum v4l2_buf_type i) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - int ret; - - WARN_ON(priv != file->private_data); - - if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - if (icd->streamer != file) - return -EBUSY; - - /* This calls buf_queue from host driver's videobuf2_queue_ops */ - ret = vb2_streamon(&icd->vb2_vidq, i); - if (!ret) - v4l2_subdev_call(sd, video, s_stream, 1); - - return ret; -} - -static int soc_camera_streamoff(struct file *file, void *priv, - enum v4l2_buf_type i) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - int ret; - - WARN_ON(priv != file->private_data); - - if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - if (icd->streamer != file) - return -EBUSY; - - /* - * This calls buf_release from host driver's videobuf2_queue_ops for all - * remaining buffers. When the last buffer is freed, stop capture - */ - ret = vb2_streamoff(&icd->vb2_vidq, i); - - v4l2_subdev_call(sd, video, s_stream, 0); - - return ret; -} - -static int soc_camera_g_selection(struct file *file, void *fh, - struct v4l2_selection *s) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - /* With a wrong type no need to try to fall back to cropping */ - if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - return ici->ops->get_selection(icd, s); -} - -static int soc_camera_s_selection(struct file *file, void *fh, - struct v4l2_selection *s) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - int ret; - - /* In all these cases cropping emulation will not help */ - if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || - (s->target != V4L2_SEL_TGT_COMPOSE && - s->target != V4L2_SEL_TGT_CROP)) - return -EINVAL; - - if (s->target == V4L2_SEL_TGT_COMPOSE) { - /* No output size change during a running capture! */ - if (vb2_is_streaming(&icd->vb2_vidq) && - (icd->user_width != s->r.width || - icd->user_height != s->r.height)) - return -EBUSY; - - /* - * Only one user is allowed to change the output format, touch - * buffers, start / stop streaming, poll for data - */ - if (icd->streamer && icd->streamer != file) - return -EBUSY; - } - - if (s->target == V4L2_SEL_TGT_CROP && - vb2_is_streaming(&icd->vb2_vidq) && - ici->ops->set_liveselection) - ret = ici->ops->set_liveselection(icd, s); - else - ret = ici->ops->set_selection(icd, s); - if (!ret && - s->target == V4L2_SEL_TGT_COMPOSE) { - icd->user_width = s->r.width; - icd->user_height = s->r.height; - if (!icd->streamer) - icd->streamer = file; - } - - return ret; -} - -static int soc_camera_g_parm(struct file *file, void *fh, - struct v4l2_streamparm *a) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - if (ici->ops->get_parm) - return ici->ops->get_parm(icd, a); - - return -ENOIOCTLCMD; -} - -static int soc_camera_s_parm(struct file *file, void *fh, - struct v4l2_streamparm *a) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - if (ici->ops->set_parm) - return ici->ops->set_parm(icd, a); - - return -ENOIOCTLCMD; -} - -static int soc_camera_probe(struct soc_camera_host *ici, - struct soc_camera_device *icd); - -/* So far this function cannot fail */ -static void scan_add_host(struct soc_camera_host *ici) -{ - struct soc_camera_device *icd; - - mutex_lock(&list_lock); - - list_for_each_entry(icd, &devices, list) - if (icd->iface == ici->nr) { - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc; - - /* The camera could have been already on, try to reset */ - if (ssdd->reset) - if (icd->control) - ssdd->reset(icd->control); - - icd->parent = ici->v4l2_dev.dev; - - /* Ignore errors */ - soc_camera_probe(ici, icd); - } - - mutex_unlock(&list_lock); -} - -/* - * It is invalid to call v4l2_clk_enable() after a successful probing - * asynchronously outside of V4L2 operations, i.e. with .host_lock not held. - */ -static int soc_camera_clk_enable(struct v4l2_clk *clk) -{ - struct soc_camera_device *icd = clk->priv; - struct soc_camera_host *ici; - - if (!icd || !icd->parent) - return -ENODEV; - - ici = to_soc_camera_host(icd->parent); - - if (!try_module_get(ici->ops->owner)) - return -ENODEV; - - /* - * If a different client is currently being probed, the host will tell - * you to go - */ - return soc_camera_clock_start(ici); -} - -static void soc_camera_clk_disable(struct v4l2_clk *clk) -{ - struct soc_camera_device *icd = clk->priv; - struct soc_camera_host *ici; - - if (!icd || !icd->parent) - return; - - ici = to_soc_camera_host(icd->parent); - - soc_camera_clock_stop(ici); - - module_put(ici->ops->owner); -} - -/* - * Eventually, it would be more logical to make the respective host the clock - * owner, but then we would have to copy this struct for each ici. Besides, it - * would introduce the circular dependency problem, unless we port all client - * drivers to release the clock, when not in use. - */ -static const struct v4l2_clk_ops soc_camera_clk_ops = { - .owner = THIS_MODULE, - .enable = soc_camera_clk_enable, - .disable = soc_camera_clk_disable, -}; - -static int soc_camera_dyn_pdev(struct soc_camera_desc *sdesc, - struct soc_camera_async_client *sasc) -{ - struct platform_device *pdev; - int ret, i; - - mutex_lock(&list_lock); - i = find_first_zero_bit(device_map, MAP_MAX_NUM); - if (i < MAP_MAX_NUM) - set_bit(i, device_map); - mutex_unlock(&list_lock); - if (i >= MAP_MAX_NUM) - return -ENOMEM; - - pdev = platform_device_alloc("soc-camera-pdrv", i); - if (!pdev) - return -ENOMEM; - - ret = platform_device_add_data(pdev, sdesc, sizeof(*sdesc)); - if (ret < 0) { - platform_device_put(pdev); - return ret; - } - - sasc->pdev = pdev; - - return 0; -} - -static struct soc_camera_device *soc_camera_add_pdev(struct soc_camera_async_client *sasc) -{ - struct platform_device *pdev = sasc->pdev; - int ret; - - ret = platform_device_add(pdev); - if (ret < 0 || !pdev->dev.driver) - return NULL; - - return platform_get_drvdata(pdev); -} - -/* Locking: called with .host_lock held */ -static int soc_camera_probe_finish(struct soc_camera_device *icd) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_format fmt = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - struct v4l2_mbus_framefmt *mf = &fmt.format; - int ret; - - sd->grp_id = soc_camera_grp_id(icd); - v4l2_set_subdev_hostdata(sd, icd); - - v4l2_subdev_call(sd, video, g_tvnorms, &icd->vdev->tvnorms); - - ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, - NULL, true); - if (ret < 0) - return ret; - - ret = soc_camera_add_device(icd); - if (ret < 0) { - dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret); - return ret; - } - - /* At this point client .probe() should have run already */ - ret = soc_camera_init_user_formats(icd); - if (ret < 0) - goto eusrfmt; - - icd->field = V4L2_FIELD_ANY; - - ret = soc_camera_video_start(icd); - if (ret < 0) - goto evidstart; - - /* Try to improve our guess of a reasonable window format */ - if (!v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt)) { - icd->user_width = mf->width; - icd->user_height = mf->height; - icd->colorspace = mf->colorspace; - icd->field = mf->field; - } - soc_camera_remove_device(icd); - - return 0; - -evidstart: - soc_camera_free_user_formats(icd); -eusrfmt: - soc_camera_remove_device(icd); - - return ret; -} - -#ifdef CONFIG_I2C_BOARDINFO -static int soc_camera_i2c_init(struct soc_camera_device *icd, - struct soc_camera_desc *sdesc) -{ - struct soc_camera_subdev_desc *ssdd; - struct i2c_client *client; - struct soc_camera_host *ici; - struct soc_camera_host_desc *shd = &sdesc->host_desc; - struct i2c_adapter *adap; - struct v4l2_subdev *subdev; - char clk_name[V4L2_CLK_NAME_SIZE]; - int ret; - - /* First find out how we link the main client */ - if (icd->sasc) { - /* Async non-OF probing handled by the subdevice list */ - return -EPROBE_DEFER; - } - - ici = to_soc_camera_host(icd->parent); - adap = i2c_get_adapter(shd->i2c_adapter_id); - if (!adap) { - dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n", - shd->i2c_adapter_id); - return -ENODEV; - } - - ssdd = kmemdup(&sdesc->subdev_desc, sizeof(*ssdd), GFP_KERNEL); - if (!ssdd) { - ret = -ENOMEM; - goto ealloc; - } - /* - * In synchronous case we request regulators ourselves in - * soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try - * to allocate them again. - */ - ssdd->sd_pdata.num_regulators = 0; - ssdd->sd_pdata.regulators = NULL; - shd->board_info->platform_data = ssdd; - - v4l2_clk_name_i2c(clk_name, sizeof(clk_name), - shd->i2c_adapter_id, shd->board_info->addr); - - icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); - if (IS_ERR(icd->clk)) { - ret = PTR_ERR(icd->clk); - goto eclkreg; - } - - subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap, - shd->board_info, NULL); - if (!subdev) { - ret = -ENODEV; - goto ei2cnd; - } - - client = v4l2_get_subdevdata(subdev); - - /* Use to_i2c_client(dev) to recover the i2c client */ - icd->control = &client->dev; - - return 0; -ei2cnd: - v4l2_clk_unregister(icd->clk); - icd->clk = NULL; -eclkreg: - kfree(ssdd); -ealloc: - i2c_put_adapter(adap); - return ret; -} - -static void soc_camera_i2c_free(struct soc_camera_device *icd) -{ - struct i2c_client *client = - to_i2c_client(to_soc_camera_control(icd)); - struct i2c_adapter *adap; - struct soc_camera_subdev_desc *ssdd; - - icd->control = NULL; - if (icd->sasc) - return; - - adap = client->adapter; - ssdd = client->dev.platform_data; - v4l2_device_unregister_subdev(i2c_get_clientdata(client)); - i2c_unregister_device(client); - i2c_put_adapter(adap); - kfree(ssdd); - v4l2_clk_unregister(icd->clk); - icd->clk = NULL; -} - -/* - * V4L2 asynchronous notifier callbacks. They are all called under a v4l2-async - * internal global mutex, therefore cannot race against other asynchronous - * events. Until notifier->complete() (soc_camera_async_complete()) is called, - * the video device node is not registered and no V4L fops can occur. Unloading - * of the host driver also calls a v4l2-async function, so also there we're - * protected. - */ -static int soc_camera_async_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, - struct v4l2_async_subdev *asd) -{ - struct soc_camera_async_client *sasc = container_of(notifier, - struct soc_camera_async_client, notifier); - struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev); - - if (asd == sasc->sensor && !WARN_ON(icd->control)) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - - /* - * Only now we get subdevice-specific information like - * regulators, flags, callbacks, etc. - */ - if (client) { - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - struct soc_camera_subdev_desc *ssdd = - soc_camera_i2c_to_desc(client); - if (ssdd) { - memcpy(&sdesc->subdev_desc, ssdd, - sizeof(sdesc->subdev_desc)); - if (ssdd->reset) - ssdd->reset(&client->dev); - } - - icd->control = &client->dev; - } - } - - return 0; -} - -static void soc_camera_async_unbind(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, - struct v4l2_async_subdev *asd) -{ - struct soc_camera_async_client *sasc = container_of(notifier, - struct soc_camera_async_client, notifier); - struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev); - - icd->control = NULL; - - if (icd->clk) { - v4l2_clk_unregister(icd->clk); - icd->clk = NULL; - } -} - -static int soc_camera_async_complete(struct v4l2_async_notifier *notifier) -{ - struct soc_camera_async_client *sasc = container_of(notifier, - struct soc_camera_async_client, notifier); - struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev); - - if (to_soc_camera_control(icd)) { - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - int ret; - - mutex_lock(&list_lock); - ret = soc_camera_probe(ici, icd); - mutex_unlock(&list_lock); - if (ret < 0) - return ret; - } - - return 0; -} - -static const struct v4l2_async_notifier_operations soc_camera_async_ops = { - .bound = soc_camera_async_bound, - .unbind = soc_camera_async_unbind, - .complete = soc_camera_async_complete, -}; - -static int scan_async_group(struct soc_camera_host *ici, - struct v4l2_async_subdev **asd, unsigned int size) -{ - struct soc_camera_async_subdev *sasd; - struct soc_camera_async_client *sasc; - struct soc_camera_device *icd; - struct soc_camera_desc sdesc = {.host_desc.bus_id = ici->nr,}; - char clk_name[V4L2_CLK_NAME_SIZE]; - unsigned int i; - int ret; - - /* First look for a sensor */ - for (i = 0; i < size; i++) { - sasd = container_of(asd[i], struct soc_camera_async_subdev, asd); - if (sasd->role == SOCAM_SUBDEV_DATA_SOURCE) - break; - } - - if (i >= size || asd[i]->match_type != V4L2_ASYNC_MATCH_I2C) { - /* All useless */ - dev_err(ici->v4l2_dev.dev, "No I2C data source found!\n"); - return -ENODEV; - } - - /* Or shall this be managed by the soc-camera device? */ - sasc = devm_kzalloc(ici->v4l2_dev.dev, sizeof(*sasc), GFP_KERNEL); - if (!sasc) - return -ENOMEM; - - /* HACK: just need a != NULL */ - sdesc.host_desc.board_info = ERR_PTR(-ENODATA); - - ret = soc_camera_dyn_pdev(&sdesc, sasc); - if (ret < 0) - goto eallocpdev; - - sasc->sensor = &sasd->asd; - - icd = soc_camera_add_pdev(sasc); - if (!icd) { - ret = -ENOMEM; - goto eaddpdev; - } - - v4l2_async_notifier_init(&sasc->notifier); - - for (i = 0; i < size; i++) { - ret = v4l2_async_notifier_add_subdev(&sasc->notifier, asd[i]); - if (ret) - goto eaddasd; - } - - sasc->notifier.ops = &soc_camera_async_ops; - - icd->sasc = sasc; - icd->parent = ici->v4l2_dev.dev; - - v4l2_clk_name_i2c(clk_name, sizeof(clk_name), - sasd->asd.match.i2c.adapter_id, - sasd->asd.match.i2c.address); - - icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); - if (IS_ERR(icd->clk)) { - ret = PTR_ERR(icd->clk); - goto eclkreg; - } - - ret = v4l2_async_notifier_register(&ici->v4l2_dev, &sasc->notifier); - if (!ret) - return 0; - - v4l2_clk_unregister(icd->clk); -eclkreg: - icd->clk = NULL; -eaddasd: - v4l2_async_notifier_cleanup(&sasc->notifier); - platform_device_del(sasc->pdev); -eaddpdev: - platform_device_put(sasc->pdev); -eallocpdev: - devm_kfree(ici->v4l2_dev.dev, sasc); - dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret); - - return ret; -} - -static void scan_async_host(struct soc_camera_host *ici) -{ - struct v4l2_async_subdev **asd; - int j; - - for (j = 0, asd = ici->asd; ici->asd_sizes[j]; j++) { - scan_async_group(ici, asd, ici->asd_sizes[j]); - asd += ici->asd_sizes[j]; - } -} -#else -#define soc_camera_i2c_init(icd, sdesc) (-ENODEV) -#define soc_camera_i2c_free(icd) do {} while (0) -#define scan_async_host(ici) do {} while (0) -#endif - -#ifdef CONFIG_OF - -struct soc_of_info { - struct soc_camera_async_subdev sasd; - struct soc_camera_async_client sasc; - struct v4l2_async_subdev *subdev; -}; - -static int soc_of_bind(struct soc_camera_host *ici, - struct device_node *ep, - struct device_node *remote) -{ - struct soc_camera_device *icd; - struct soc_camera_desc sdesc = {.host_desc.bus_id = ici->nr,}; - struct soc_camera_async_client *sasc; - struct soc_of_info *info; - struct i2c_client *client; - char clk_name[V4L2_CLK_NAME_SIZE]; - int ret; - - /* allocate a new subdev and add match info to it */ - info = devm_kzalloc(ici->v4l2_dev.dev, sizeof(struct soc_of_info), - GFP_KERNEL); - if (!info) - return -ENOMEM; - - info->sasd.asd.match.fwnode = of_fwnode_handle(remote); - info->sasd.asd.match_type = V4L2_ASYNC_MATCH_FWNODE; - info->subdev = &info->sasd.asd; - - /* Or shall this be managed by the soc-camera device? */ - sasc = &info->sasc; - - /* HACK: just need a != NULL */ - sdesc.host_desc.board_info = ERR_PTR(-ENODATA); - - ret = soc_camera_dyn_pdev(&sdesc, sasc); - if (ret < 0) - goto eallocpdev; - - sasc->sensor = &info->sasd.asd; - - icd = soc_camera_add_pdev(sasc); - if (!icd) { - ret = -ENOMEM; - goto eaddpdev; - } - - v4l2_async_notifier_init(&sasc->notifier); - - ret = v4l2_async_notifier_add_subdev(&sasc->notifier, info->subdev); - if (ret) { - of_node_put(remote); - goto eaddasd; - } - - sasc->notifier.ops = &soc_camera_async_ops; - - icd->sasc = sasc; - icd->parent = ici->v4l2_dev.dev; - - client = of_find_i2c_device_by_node(remote); - - if (client) - v4l2_clk_name_i2c(clk_name, sizeof(clk_name), - client->adapter->nr, client->addr); - else - v4l2_clk_name_of(clk_name, sizeof(clk_name), remote); - - icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); - if (IS_ERR(icd->clk)) { - ret = PTR_ERR(icd->clk); - goto eclkreg; - } - - ret = v4l2_async_notifier_register(&ici->v4l2_dev, &sasc->notifier); - if (!ret) - return 0; - - v4l2_clk_unregister(icd->clk); -eclkreg: - icd->clk = NULL; -eaddasd: - v4l2_async_notifier_cleanup(&sasc->notifier); - platform_device_del(sasc->pdev); -eaddpdev: - platform_device_put(sasc->pdev); -eallocpdev: - devm_kfree(ici->v4l2_dev.dev, info); - dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret); - - return ret; -} - -static void scan_of_host(struct soc_camera_host *ici) -{ - struct device *dev = ici->v4l2_dev.dev; - struct device_node *np = dev->of_node; - struct device_node *epn = NULL, *rem; - unsigned int i; - - for (i = 0; ; i++) { - epn = of_graph_get_next_endpoint(np, epn); - if (!epn) - break; - - rem = of_graph_get_remote_port_parent(epn); - if (!rem) { - dev_notice(dev, "no remote for %pOF\n", epn); - continue; - } - - /* so we now have a remote node to connect */ - if (!i) - soc_of_bind(ici, epn, rem); - - if (i) { - dev_err(dev, "multiple subdevices aren't supported yet!\n"); - break; - } - } - - of_node_put(epn); -} - -#else -static inline void scan_of_host(struct soc_camera_host *ici) { } -#endif - -/* Called during host-driver probe */ -static int soc_camera_probe(struct soc_camera_host *ici, - struct soc_camera_device *icd) -{ - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - struct soc_camera_host_desc *shd = &sdesc->host_desc; - struct device *control = NULL; - int ret; - - dev_info(icd->pdev, "Probing %s\n", dev_name(icd->pdev)); - - /* - * Currently the subdev with the largest number of controls (13) is - * ov6550. So let's pick 16 as a hint for the control handler. Note - * that this is a hint only: too large and you waste some memory, too - * small and there is a (very) small performance hit when looking up - * controls in the internal hash. - */ - ret = v4l2_ctrl_handler_init(&icd->ctrl_handler, 16); - if (ret < 0) - return ret; - - /* Must have icd->vdev before registering the device */ - ret = video_dev_create(icd); - if (ret < 0) - goto evdc; - - /* - * ..._video_start() will create a device node, video_register_device() - * itself is protected against concurrent open() calls, but we also have - * to protect our data also during client probing. - */ - - /* Non-i2c cameras, e.g., soc_camera_platform, have no board_info */ - if (shd->board_info) { - ret = soc_camera_i2c_init(icd, sdesc); - if (ret < 0 && ret != -EPROBE_DEFER) - goto eadd; - } else if (!shd->add_device || !shd->del_device) { - ret = -EINVAL; - goto eadd; - } else { - ret = soc_camera_clock_start(ici); - if (ret < 0) - goto eadd; - - if (shd->module_name) - ret = request_module(shd->module_name); - - ret = shd->add_device(icd); - if (ret < 0) - goto eadddev; - - /* - * FIXME: this is racy, have to use driver-binding notification, - * when it is available - */ - control = to_soc_camera_control(icd); - if (!control || !control->driver || !dev_get_drvdata(control) || - !try_module_get(control->driver->owner)) { - shd->del_device(icd); - ret = -ENODEV; - goto enodrv; - } - } - - mutex_lock(&ici->host_lock); - ret = soc_camera_probe_finish(icd); - mutex_unlock(&ici->host_lock); - if (ret < 0) - goto efinish; - - return 0; - -efinish: - if (shd->board_info) { - soc_camera_i2c_free(icd); - } else { - shd->del_device(icd); - module_put(control->driver->owner); -enodrv: -eadddev: - soc_camera_clock_stop(ici); - } -eadd: - if (icd->vdev) { - video_device_release(icd->vdev); - icd->vdev = NULL; - } -evdc: - v4l2_ctrl_handler_free(&icd->ctrl_handler); - return ret; -} - -/* - * This is called on device_unregister, which only means we have to disconnect - * from the host, but not remove ourselves from the device list. With - * asynchronous client probing this can also be called without - * soc_camera_probe_finish() having run. Careful with clean up. - */ -static int soc_camera_remove(struct soc_camera_device *icd) -{ - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - struct video_device *vdev = icd->vdev; - - v4l2_ctrl_handler_free(&icd->ctrl_handler); - if (vdev) { - video_unregister_device(vdev); - icd->vdev = NULL; - } - - if (sdesc->host_desc.board_info) { - soc_camera_i2c_free(icd); - } else { - struct device *dev = to_soc_camera_control(icd); - struct device_driver *drv = dev ? dev->driver : NULL; - if (drv) { - sdesc->host_desc.del_device(icd); - module_put(drv->owner); - } - } - - if (icd->num_user_formats) - soc_camera_free_user_formats(icd); - - if (icd->clk) { - /* For the synchronous case */ - v4l2_clk_unregister(icd->clk); - icd->clk = NULL; - } - - if (icd->sasc) - platform_device_unregister(icd->sasc->pdev); - - return 0; -} - -static int default_g_selection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = sel->target, - }; - int ret; - - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel); - if (ret) - return ret; - sel->r = sdsel.r; - return 0; -} - -static int default_s_selection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = sel->target, - .flags = sel->flags, - .r = sel->r, - }; - int ret; - - ret = v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); - if (ret) - return ret; - sel->r = sdsel.r; - return 0; -} - -static int default_g_parm(struct soc_camera_device *icd, - struct v4l2_streamparm *a) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - - return v4l2_g_parm_cap(icd->vdev, sd, a); -} - -static int default_s_parm(struct soc_camera_device *icd, - struct v4l2_streamparm *a) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - - return v4l2_s_parm_cap(icd->vdev, sd, a); -} - -static int default_enum_framesizes(struct soc_camera_device *icd, - struct v4l2_frmsizeenum *fsize) -{ - int ret; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - const struct soc_camera_format_xlate *xlate; - struct v4l2_subdev_frame_size_enum fse = { - .index = fsize->index, - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - - xlate = soc_camera_xlate_by_fourcc(icd, fsize->pixel_format); - if (!xlate) - return -EINVAL; - fse.code = xlate->code; - - ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse); - if (ret < 0) - return ret; - - if (fse.min_width == fse.max_width && - fse.min_height == fse.max_height) { - fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; - fsize->discrete.width = fse.min_width; - fsize->discrete.height = fse.min_height; - return 0; - } - fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; - fsize->stepwise.min_width = fse.min_width; - fsize->stepwise.max_width = fse.max_width; - fsize->stepwise.min_height = fse.min_height; - fsize->stepwise.max_height = fse.max_height; - fsize->stepwise.step_width = 1; - fsize->stepwise.step_height = 1; - return 0; -} - -int soc_camera_host_register(struct soc_camera_host *ici) -{ - struct soc_camera_host *ix; - int ret; - - if (!ici || !ici->ops || - !ici->ops->try_fmt || - !ici->ops->set_fmt || - !ici->ops->set_bus_param || - !ici->ops->querycap || - !ici->ops->init_videobuf2 || - !ici->ops->poll || - !ici->v4l2_dev.dev) - return -EINVAL; - - if (!ici->ops->set_selection) - ici->ops->set_selection = default_s_selection; - if (!ici->ops->get_selection) - ici->ops->get_selection = default_g_selection; - if (!ici->ops->set_parm) - ici->ops->set_parm = default_s_parm; - if (!ici->ops->get_parm) - ici->ops->get_parm = default_g_parm; - if (!ici->ops->enum_framesizes) - ici->ops->enum_framesizes = default_enum_framesizes; - - mutex_lock(&list_lock); - list_for_each_entry(ix, &hosts, list) { - if (ix->nr == ici->nr) { - ret = -EBUSY; - goto edevreg; - } - } - - ret = v4l2_device_register(ici->v4l2_dev.dev, &ici->v4l2_dev); - if (ret < 0) - goto edevreg; - - list_add_tail(&ici->list, &hosts); - mutex_unlock(&list_lock); - - mutex_init(&ici->host_lock); - mutex_init(&ici->clk_lock); - - if (ici->v4l2_dev.dev->of_node) - scan_of_host(ici); - else if (ici->asd_sizes) - /* - * No OF, host with a list of subdevices. Don't try to mix - * modes by initialising some groups statically and some - * dynamically! - */ - scan_async_host(ici); - else - /* Legacy: static platform devices from board data */ - scan_add_host(ici); - - return 0; - -edevreg: - mutex_unlock(&list_lock); - return ret; -} -EXPORT_SYMBOL(soc_camera_host_register); - -/* Unregister all clients! */ -void soc_camera_host_unregister(struct soc_camera_host *ici) -{ - struct soc_camera_device *icd, *tmp; - struct soc_camera_async_client *sasc; - LIST_HEAD(notifiers); - - mutex_lock(&list_lock); - list_del(&ici->list); - list_for_each_entry(icd, &devices, list) - if (icd->iface == ici->nr && icd->sasc) { - /* as long as we hold the device, sasc won't be freed */ - get_device(icd->pdev); - list_add(&icd->sasc->list, ¬ifiers); - } - mutex_unlock(&list_lock); - - list_for_each_entry(sasc, ¬ifiers, list) { - /* Must call unlocked to avoid AB-BA dead-lock */ - v4l2_async_notifier_unregister(&sasc->notifier); - v4l2_async_notifier_cleanup(&sasc->notifier); - put_device(&sasc->pdev->dev); - } - - mutex_lock(&list_lock); - - list_for_each_entry_safe(icd, tmp, &devices, list) - if (icd->iface == ici->nr) - soc_camera_remove(icd); - - mutex_unlock(&list_lock); - - v4l2_device_unregister(&ici->v4l2_dev); -} -EXPORT_SYMBOL(soc_camera_host_unregister); - -/* Image capture device */ -static int soc_camera_device_register(struct soc_camera_device *icd) -{ - struct soc_camera_device *ix; - int num = -1, i; - - mutex_lock(&list_lock); - for (i = 0; i < 256 && num < 0; i++) { - num = i; - /* Check if this index is available on this interface */ - list_for_each_entry(ix, &devices, list) { - if (ix->iface == icd->iface && ix->devnum == i) { - num = -1; - break; - } - } - } - - if (num < 0) { - /* - * ok, we have 256 cameras on this host... - * man, stay reasonable... - */ - mutex_unlock(&list_lock); - return -ENOMEM; - } - - icd->devnum = num; - icd->use_count = 0; - icd->host_priv = NULL; - - /* - * Dynamically allocated devices set the bit earlier, but it doesn't hurt setting - * it again - */ - i = to_platform_device(icd->pdev)->id; - if (i < 0) - /* One static (legacy) soc-camera platform device */ - i = 0; - if (i >= MAP_MAX_NUM) { - mutex_unlock(&list_lock); - return -EBUSY; - } - set_bit(i, device_map); - list_add_tail(&icd->list, &devices); - mutex_unlock(&list_lock); - - return 0; -} - -static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = { - .vidioc_querycap = soc_camera_querycap, - .vidioc_try_fmt_vid_cap = soc_camera_try_fmt_vid_cap, - .vidioc_g_fmt_vid_cap = soc_camera_g_fmt_vid_cap, - .vidioc_s_fmt_vid_cap = soc_camera_s_fmt_vid_cap, - .vidioc_enum_fmt_vid_cap = soc_camera_enum_fmt_vid_cap, - .vidioc_enum_input = soc_camera_enum_input, - .vidioc_g_input = soc_camera_g_input, - .vidioc_s_input = soc_camera_s_input, - .vidioc_s_std = soc_camera_s_std, - .vidioc_g_std = soc_camera_g_std, - .vidioc_enum_framesizes = soc_camera_enum_framesizes, - .vidioc_reqbufs = soc_camera_reqbufs, - .vidioc_querybuf = soc_camera_querybuf, - .vidioc_qbuf = soc_camera_qbuf, - .vidioc_dqbuf = soc_camera_dqbuf, - .vidioc_create_bufs = soc_camera_create_bufs, - .vidioc_prepare_buf = soc_camera_prepare_buf, - .vidioc_expbuf = soc_camera_expbuf, - .vidioc_streamon = soc_camera_streamon, - .vidioc_streamoff = soc_camera_streamoff, - .vidioc_g_selection = soc_camera_g_selection, - .vidioc_s_selection = soc_camera_s_selection, - .vidioc_g_parm = soc_camera_g_parm, - .vidioc_s_parm = soc_camera_s_parm, -}; - -static int video_dev_create(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct video_device *vdev = video_device_alloc(); - - if (!vdev) - return -ENOMEM; - - strscpy(vdev->name, ici->drv_name, sizeof(vdev->name)); - - vdev->v4l2_dev = &ici->v4l2_dev; - vdev->fops = &soc_camera_fops; - vdev->ioctl_ops = &soc_camera_ioctl_ops; - vdev->release = video_device_release; - vdev->ctrl_handler = &icd->ctrl_handler; - vdev->lock = &ici->host_lock; - - icd->vdev = vdev; - - return 0; -} - -/* - * Called from soc_camera_probe() above with .host_lock held - */ -static int soc_camera_video_start(struct soc_camera_device *icd) -{ - const struct device_type *type = icd->vdev->dev.type; - int ret; - - if (!icd->parent) - return -ENODEV; - - video_set_drvdata(icd->vdev, icd); - if (icd->vdev->tvnorms == 0) { - /* disable the STD API if there are no tvnorms defined */ - v4l2_disable_ioctl(icd->vdev, VIDIOC_G_STD); - v4l2_disable_ioctl(icd->vdev, VIDIOC_S_STD); - v4l2_disable_ioctl(icd->vdev, VIDIOC_ENUMSTD); - } - ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1); - if (ret < 0) { - dev_err(icd->pdev, "video_register_device failed: %d\n", ret); - return ret; - } - - /* Restore device type, possibly set by the subdevice driver */ - icd->vdev->dev.type = type; - - return 0; -} - -static int soc_camera_pdrv_probe(struct platform_device *pdev) -{ - struct soc_camera_desc *sdesc = pdev->dev.platform_data; - struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc; - struct soc_camera_device *icd; - int ret; - - if (!sdesc) - return -EINVAL; - - icd = devm_kzalloc(&pdev->dev, sizeof(*icd), GFP_KERNEL); - if (!icd) - return -ENOMEM; - - /* - * In the asynchronous case ssdd->num_regulators == 0 yet, so, the below - * regulator allocation is a dummy. They are actually requested by the - * subdevice driver, using soc_camera_power_init(). Also note, that in - * that case regulators are attached to the I2C device and not to the - * camera platform device. - */ - ret = devm_regulator_bulk_get(&pdev->dev, ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); - if (ret < 0) - return ret; - - icd->iface = sdesc->host_desc.bus_id; - icd->sdesc = sdesc; - icd->pdev = &pdev->dev; - platform_set_drvdata(pdev, icd); - - icd->user_width = DEFAULT_WIDTH; - icd->user_height = DEFAULT_HEIGHT; - - return soc_camera_device_register(icd); -} - -/* - * Only called on rmmod for each platform device, since they are not - * hot-pluggable. Now we know, that all our users - hosts and devices have - * been unloaded already - */ -static int soc_camera_pdrv_remove(struct platform_device *pdev) -{ - struct soc_camera_device *icd = platform_get_drvdata(pdev); - int i; - - if (!icd) - return -EINVAL; - - i = pdev->id; - if (i < 0) - i = 0; - - /* - * In synchronous mode with static platform devices this is called in a - * loop from drivers/base/dd.c::driver_detach(), no parallel execution, - * no need to lock. In asynchronous case the caller - - * soc_camera_host_unregister() - already holds the lock - */ - if (test_bit(i, device_map)) { - clear_bit(i, device_map); - list_del(&icd->list); - } - - return 0; -} - -static struct platform_driver __refdata soc_camera_pdrv = { - .probe = soc_camera_pdrv_probe, - .remove = soc_camera_pdrv_remove, - .driver = { - .name = "soc-camera-pdrv", - }, -}; - -module_platform_driver(soc_camera_pdrv); - -MODULE_DESCRIPTION("Image capture bus driver"); -MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:soc-camera-pdrv"); diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c deleted file mode 100644 index 79fbe1fea95f..000000000000 --- a/drivers/media/platform/soc_camera/soc_camera_platform.c +++ /dev/null @@ -1,188 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Generic Platform Camera Driver - * - * Copyright (C) 2008 Magnus Damm - * Based on mt9m001 driver, - * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/platform_device.h> -#include <linux/videodev2.h> -#include <media/v4l2-subdev.h> -#include <media/soc_camera.h> -#include <linux/platform_data/media/soc_camera_platform.h> - -struct soc_camera_platform_priv { - struct v4l2_subdev subdev; -}; - -static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev) -{ - struct v4l2_subdev *subdev = platform_get_drvdata(pdev); - return container_of(subdev, struct soc_camera_platform_priv, subdev); -} - -static int soc_camera_platform_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - return p->set_capture(p, enable); -} - -static int soc_camera_platform_fill_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - struct v4l2_mbus_framefmt *mf = &format->format; - - mf->width = p->format.width; - mf->height = p->format.height; - mf->code = p->format.code; - mf->colorspace = p->format.colorspace; - mf->field = p->format.field; - - return 0; -} - -static int soc_camera_platform_s_power(struct v4l2_subdev *sd, int on) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - - return soc_camera_set_power(p->icd->control, &p->icd->sdesc->subdev_desc, NULL, on); -} - -static const struct v4l2_subdev_core_ops platform_subdev_core_ops = { - .s_power = soc_camera_platform_s_power, -}; - -static int soc_camera_platform_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - - if (code->pad || code->index) - return -EINVAL; - - code->code = p->format.code; - return 0; -} - -static int soc_camera_platform_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - case V4L2_SEL_TGT_CROP_DEFAULT: - case V4L2_SEL_TGT_CROP: - sel->r.left = 0; - sel->r.top = 0; - sel->r.width = p->format.width; - sel->r.height = p->format.height; - return 0; - default: - return -EINVAL; - } -} - -static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - - cfg->flags = p->mbus_param; - cfg->type = p->mbus_type; - - return 0; -} - -static const struct v4l2_subdev_video_ops platform_subdev_video_ops = { - .s_stream = soc_camera_platform_s_stream, - .g_mbus_config = soc_camera_platform_g_mbus_config, -}; - -static const struct v4l2_subdev_pad_ops platform_subdev_pad_ops = { - .enum_mbus_code = soc_camera_platform_enum_mbus_code, - .get_selection = soc_camera_platform_get_selection, - .get_fmt = soc_camera_platform_fill_fmt, - .set_fmt = soc_camera_platform_fill_fmt, -}; - -static const struct v4l2_subdev_ops platform_subdev_ops = { - .core = &platform_subdev_core_ops, - .video = &platform_subdev_video_ops, - .pad = &platform_subdev_pad_ops, -}; - -static int soc_camera_platform_probe(struct platform_device *pdev) -{ - struct soc_camera_host *ici; - struct soc_camera_platform_priv *priv; - struct soc_camera_platform_info *p = pdev->dev.platform_data; - struct soc_camera_device *icd; - - if (!p) - return -EINVAL; - - if (!p->icd) { - dev_err(&pdev->dev, - "Platform has not set soc_camera_device pointer!\n"); - return -EINVAL; - } - - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - icd = p->icd; - - /* soc-camera convention: control's drvdata points to the subdev */ - platform_set_drvdata(pdev, &priv->subdev); - /* Set the control device reference */ - icd->control = &pdev->dev; - - ici = to_soc_camera_host(icd->parent); - - v4l2_subdev_init(&priv->subdev, &platform_subdev_ops); - v4l2_set_subdevdata(&priv->subdev, p); - strscpy(priv->subdev.name, dev_name(&pdev->dev), - sizeof(priv->subdev.name)); - - return v4l2_device_register_subdev(&ici->v4l2_dev, &priv->subdev); -} - -static int soc_camera_platform_remove(struct platform_device *pdev) -{ - struct soc_camera_platform_priv *priv = get_priv(pdev); - struct soc_camera_platform_info *p = v4l2_get_subdevdata(&priv->subdev); - - p->icd->control = NULL; - v4l2_device_unregister_subdev(&priv->subdev); - return 0; -} - -static struct platform_driver soc_camera_platform_driver = { - .driver = { - .name = "soc_camera_platform", - }, - .probe = soc_camera_platform_probe, - .remove = soc_camera_platform_remove, -}; - -module_platform_driver(soc_camera_platform_driver); - -MODULE_DESCRIPTION("SoC Camera Platform driver"); -MODULE_AUTHOR("Magnus Damm"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:soc_camera_platform"); diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c deleted file mode 100644 index be74008ec0ca..000000000000 --- a/drivers/media/platform/soc_camera/soc_mediabus.c +++ /dev/null @@ -1,533 +0,0 @@ -/* - * soc-camera media bus helper routines - * - * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/module.h> - -#include <media/v4l2-device.h> -#include <media/v4l2-mediabus.h> -#include <media/drv-intf/soc_mediabus.h> - -static const struct soc_mbus_lookup mbus_fmt[] = { -{ - .code = MEDIA_BUS_FMT_YUYV8_2X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_YUYV, - .name = "YUYV", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YVYU8_2X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_YVYU, - .name = "YVYU", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_UYVY8_2X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_UYVY, - .name = "UYVY", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_VYUY8_2X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_VYUY, - .name = "VYUY", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB555, - .name = "RGB555", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB555X, - .name = "RGB555X", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB565_2X8_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB565, - .name = "RGB565", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB565_2X8_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB565X, - .name = "RGB565X", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB666_1X18, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB32, - .name = "RGB666/32bpp", - .bits_per_sample = 18, - .packing = SOC_MBUS_PACKING_EXTEND32, - .order = SOC_MBUS_ORDER_LE, - }, -}, { - .code = MEDIA_BUS_FMT_RGB888_1X24, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB32, - .name = "RGB888/32bpp", - .bits_per_sample = 24, - .packing = SOC_MBUS_PACKING_EXTEND32, - .order = SOC_MBUS_ORDER_LE, - }, -}, { - .code = MEDIA_BUS_FMT_RGB888_2X12_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB32, - .name = "RGB888/32bpp", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND32, - .order = SOC_MBUS_ORDER_BE, - }, -}, { - .code = MEDIA_BUS_FMT_RGB888_2X12_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB32, - .name = "RGB888/32bpp", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND32, - .order = SOC_MBUS_ORDER_LE, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR8_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR8, - .name = "Bayer 8 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_NONE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_Y8_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_GREY, - .name = "Grey", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_NONE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_Y10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_Y10, - .name = "Grey 10bit", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADLO, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADLO, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_JPEG_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_JPEG, - .name = "JPEG", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_VARIABLE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB444, - .name = "RGB444", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YUYV8_1_5X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_YUV420, - .name = "YUYV 4:2:0", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_1_5X8, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YVYU8_1_5X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_YVU420, - .name = "YVYU 4:2:0", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_1_5X8, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_UYVY8_1X16, - .fmt = { - .fourcc = V4L2_PIX_FMT_UYVY, - .name = "UYVY 16bit", - .bits_per_sample = 16, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_VYUY8_1X16, - .fmt = { - .fourcc = V4L2_PIX_FMT_VYUY, - .name = "VYUY 16bit", - .bits_per_sample = 16, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YUYV8_1X16, - .fmt = { - .fourcc = V4L2_PIX_FMT_YUYV, - .name = "YUYV 16bit", - .bits_per_sample = 16, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YVYU8_1X16, - .fmt = { - .fourcc = V4L2_PIX_FMT_YVYU, - .name = "YVYU 16bit", - .bits_per_sample = 16, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGRBG8_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGRBG8, - .name = "Bayer 8 GRBG", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_NONE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8, - .name = "Bayer 10 BGGR DPCM 8", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_NONE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGBRG10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGBRG10, - .name = "Bayer 10 GBRG", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGRBG10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGRBG10, - .name = "Bayer 10 GRBG", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SRGGB10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_SRGGB10, - .name = "Bayer 10 RGGB", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR12_1X12, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR12, - .name = "Bayer 12 BGGR", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGBRG12_1X12, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGBRG12, - .name = "Bayer 12 GBRG", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGRBG12_1X12, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGRBG12, - .name = "Bayer 12 GRBG", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SRGGB12_1X12, - .fmt = { - .fourcc = V4L2_PIX_FMT_SRGGB12, - .name = "Bayer 12 RGGB", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, -}; - -int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf, - unsigned int *numerator, unsigned int *denominator) -{ - switch (mf->packing) { - case SOC_MBUS_PACKING_NONE: - case SOC_MBUS_PACKING_EXTEND16: - *numerator = 1; - *denominator = 1; - return 0; - case SOC_MBUS_PACKING_EXTEND32: - *numerator = 1; - *denominator = 1; - return 0; - case SOC_MBUS_PACKING_2X8_PADHI: - case SOC_MBUS_PACKING_2X8_PADLO: - *numerator = 2; - *denominator = 1; - return 0; - case SOC_MBUS_PACKING_1_5X8: - *numerator = 3; - *denominator = 2; - return 0; - case SOC_MBUS_PACKING_VARIABLE: - *numerator = 0; - *denominator = 1; - return 0; - } - return -EINVAL; -} -EXPORT_SYMBOL(soc_mbus_samples_per_pixel); - -s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) -{ - if (mf->layout != SOC_MBUS_LAYOUT_PACKED) - return width * mf->bits_per_sample / 8; - - switch (mf->packing) { - case SOC_MBUS_PACKING_NONE: - return width * mf->bits_per_sample / 8; - case SOC_MBUS_PACKING_2X8_PADHI: - case SOC_MBUS_PACKING_2X8_PADLO: - case SOC_MBUS_PACKING_EXTEND16: - return width * 2; - case SOC_MBUS_PACKING_1_5X8: - return width * 3 / 2; - case SOC_MBUS_PACKING_VARIABLE: - return 0; - case SOC_MBUS_PACKING_EXTEND32: - return width * 4; - } - return -EINVAL; -} -EXPORT_SYMBOL(soc_mbus_bytes_per_line); - -s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, - u32 bytes_per_line, u32 height) -{ - if (mf->layout == SOC_MBUS_LAYOUT_PACKED) - return bytes_per_line * height; - - switch (mf->packing) { - case SOC_MBUS_PACKING_2X8_PADHI: - case SOC_MBUS_PACKING_2X8_PADLO: - return bytes_per_line * height * 2; - case SOC_MBUS_PACKING_1_5X8: - return bytes_per_line * height * 3 / 2; - default: - return -EINVAL; - } -} -EXPORT_SYMBOL(soc_mbus_image_size); - -const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( - u32 code, - const struct soc_mbus_lookup *lookup, - int n) -{ - int i; - - for (i = 0; i < n; i++) - if (lookup[i].code == code) - return &lookup[i].fmt; - - return NULL; -} -EXPORT_SYMBOL(soc_mbus_find_fmtdesc); - -const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( - u32 code) -{ - return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt)); -} -EXPORT_SYMBOL(soc_mbus_get_fmtdesc); - -unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg, - unsigned int flags) -{ - unsigned long common_flags; - bool hsync = true, vsync = true, pclk, data, mode; - bool mipi_lanes, mipi_clock; - - common_flags = cfg->flags & flags; - - switch (cfg->type) { - case V4L2_MBUS_PARALLEL: - hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | - V4L2_MBUS_HSYNC_ACTIVE_LOW); - vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | - V4L2_MBUS_VSYNC_ACTIVE_LOW); - /* fall through */ - case V4L2_MBUS_BT656: - pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | - V4L2_MBUS_PCLK_SAMPLE_FALLING); - data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH | - V4L2_MBUS_DATA_ACTIVE_LOW); - mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE); - return (!hsync || !vsync || !pclk || !data || !mode) ? - 0 : common_flags; - case V4L2_MBUS_CSI2_DPHY: - mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES; - mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK | - V4L2_MBUS_CSI2_CONTINUOUS_CLOCK); - return (!mipi_lanes || !mipi_clock) ? 0 : common_flags; - default: - WARN_ON(1); - return -EINVAL; - } - return 0; -} -EXPORT_SYMBOL(soc_mbus_config_compatible); - -static int __init soc_mbus_init(void) -{ - return 0; -} - -static void __exit soc_mbus_exit(void) -{ -} - -module_init(soc_mbus_init); -module_exit(soc_mbus_exit); - -MODULE_DESCRIPTION("soc-camera media bus interface"); -MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c deleted file mode 100644 index 8d25ca0490f7..000000000000 --- a/drivers/media/platform/soc_camera/soc_scale_crop.c +++ /dev/null @@ -1,426 +0,0 @@ -/* - * soc-camera generic scaling-cropping manipulation functions - * - * Copyright (C) 2013 Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/device.h> -#include <linux/module.h> - -#include <media/soc_camera.h> -#include <media/v4l2-common.h> - -#include "soc_scale_crop.h" - -#ifdef DEBUG_GEOMETRY -#define dev_geo dev_info -#else -#define dev_geo dev_dbg -#endif - -/* Check if any dimension of r1 is smaller than respective one of r2 */ -static bool is_smaller(const struct v4l2_rect *r1, const struct v4l2_rect *r2) -{ - return r1->width < r2->width || r1->height < r2->height; -} - -/* Check if r1 fails to cover r2 */ -static bool is_inside(const struct v4l2_rect *r1, const struct v4l2_rect *r2) -{ - return r1->left > r2->left || r1->top > r2->top || - r1->left + r1->width < r2->left + r2->width || - r1->top + r1->height < r2->top + r2->height; -} - -/* Get and store current client crop */ -int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect) -{ - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = V4L2_SEL_TGT_CROP, - }; - int ret; - - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel); - if (!ret) { - *rect = sdsel.r; - return ret; - } - - sdsel.target = V4L2_SEL_TGT_CROP_BOUNDS; - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel); - if (!ret) - *rect = sdsel.r; - - return ret; -} -EXPORT_SYMBOL(soc_camera_client_g_rect); - -/* Client crop has changed, update our sub-rectangle to remain within the area */ -static void move_and_crop_subrect(struct v4l2_rect *rect, - struct v4l2_rect *subrect) -{ - if (rect->width < subrect->width) - subrect->width = rect->width; - - if (rect->height < subrect->height) - subrect->height = rect->height; - - if (rect->left > subrect->left) - subrect->left = rect->left; - else if (rect->left + rect->width < - subrect->left + subrect->width) - subrect->left = rect->left + rect->width - - subrect->width; - - if (rect->top > subrect->top) - subrect->top = rect->top; - else if (rect->top + rect->height < - subrect->top + subrect->height) - subrect->top = rect->top + rect->height - - subrect->height; -} - -/* - * The common for both scaling and cropping iterative approach is: - * 1. try if the client can produce exactly what requested by the user - * 2. if (1) failed, try to double the client image until we get one big enough - * 3. if (2) failed, try to request the maximum image - */ -int soc_camera_client_s_selection(struct v4l2_subdev *sd, - struct v4l2_selection *sel, struct v4l2_selection *cam_sel, - struct v4l2_rect *target_rect, struct v4l2_rect *subrect) -{ - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = sel->target, - .flags = sel->flags, - .r = sel->r, - }; - struct v4l2_subdev_selection bounds = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = V4L2_SEL_TGT_CROP_BOUNDS, - }; - struct v4l2_rect *rect = &sel->r, *cam_rect = &cam_sel->r; - struct device *dev = sd->v4l2_dev->dev; - int ret; - unsigned int width, height; - - v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); - sel->r = sdsel.r; - ret = soc_camera_client_g_rect(sd, cam_rect); - if (ret < 0) - return ret; - - /* - * Now cam_crop contains the current camera input rectangle, and it must - * be within camera cropcap bounds - */ - if (!memcmp(rect, cam_rect, sizeof(*rect))) { - /* Even if camera S_SELECTION failed, but camera rectangle matches */ - dev_dbg(dev, "Camera S_SELECTION successful for %dx%d@%d:%d\n", - rect->width, rect->height, rect->left, rect->top); - *target_rect = *cam_rect; - return 0; - } - - /* Try to fix cropping, that camera hasn't managed to set */ - dev_geo(dev, "Fix camera S_SELECTION for %dx%d@%d:%d to %dx%d@%d:%d\n", - cam_rect->width, cam_rect->height, - cam_rect->left, cam_rect->top, - rect->width, rect->height, rect->left, rect->top); - - /* We need sensor maximum rectangle */ - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &bounds); - if (ret < 0) - return ret; - - /* Put user requested rectangle within sensor bounds */ - soc_camera_limit_side(&rect->left, &rect->width, sdsel.r.left, 2, - bounds.r.width); - soc_camera_limit_side(&rect->top, &rect->height, sdsel.r.top, 4, - bounds.r.height); - - /* - * Popular special case - some cameras can only handle fixed sizes like - * QVGA, VGA,... Take care to avoid infinite loop. - */ - width = max_t(unsigned int, cam_rect->width, 2); - height = max_t(unsigned int, cam_rect->height, 2); - - /* - * Loop as long as sensor is not covering the requested rectangle and - * is still within its bounds - */ - while (!ret && (is_smaller(cam_rect, rect) || - is_inside(cam_rect, rect)) && - (bounds.r.width > width || bounds.r.height > height)) { - - width *= 2; - height *= 2; - - cam_rect->width = width; - cam_rect->height = height; - - /* - * We do not know what capabilities the camera has to set up - * left and top borders. We could try to be smarter in iterating - * them, e.g., if camera current left is to the right of the - * target left, set it to the middle point between the current - * left and minimum left. But that would add too much - * complexity: we would have to iterate each border separately. - * Instead we just drop to the left and top bounds. - */ - if (cam_rect->left > rect->left) - cam_rect->left = bounds.r.left; - - if (cam_rect->left + cam_rect->width < rect->left + rect->width) - cam_rect->width = rect->left + rect->width - - cam_rect->left; - - if (cam_rect->top > rect->top) - cam_rect->top = bounds.r.top; - - if (cam_rect->top + cam_rect->height < rect->top + rect->height) - cam_rect->height = rect->top + rect->height - - cam_rect->top; - - sdsel.r = *cam_rect; - v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); - *cam_rect = sdsel.r; - ret = soc_camera_client_g_rect(sd, cam_rect); - dev_geo(dev, "Camera S_SELECTION %d for %dx%d@%d:%d\n", ret, - cam_rect->width, cam_rect->height, - cam_rect->left, cam_rect->top); - } - - /* S_SELECTION must not modify the rectangle */ - if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) { - /* - * The camera failed to configure a suitable cropping, - * we cannot use the current rectangle, set to max - */ - sdsel.r = bounds.r; - v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); - *cam_rect = sdsel.r; - - ret = soc_camera_client_g_rect(sd, cam_rect); - dev_geo(dev, "Camera S_SELECTION %d for max %dx%d@%d:%d\n", ret, - cam_rect->width, cam_rect->height, - cam_rect->left, cam_rect->top); - } - - if (!ret) { - *target_rect = *cam_rect; - move_and_crop_subrect(target_rect, subrect); - } - - return ret; -} -EXPORT_SYMBOL(soc_camera_client_s_selection); - -/* Iterative set_fmt, also updates cached client crop on success */ -static int client_set_fmt(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - unsigned int max_width, unsigned int max_height, - struct v4l2_subdev_format *format, bool host_can_scale) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct device *dev = icd->parent; - struct v4l2_mbus_framefmt *mf = &format->format; - unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h; - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = V4L2_SEL_TGT_CROP_BOUNDS, - }; - bool host_1to1; - int ret; - - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, NULL, format); - if (ret < 0) - return ret; - - dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height); - - if (width == mf->width && height == mf->height) { - /* Perfect! The client has done it all. */ - host_1to1 = true; - goto update_cache; - } - - host_1to1 = false; - if (!host_can_scale) - goto update_cache; - - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel); - if (ret < 0) - return ret; - - if (max_width > sdsel.r.width) - max_width = sdsel.r.width; - if (max_height > sdsel.r.height) - max_height = sdsel.r.height; - - /* Camera set a format, but geometry is not precise, try to improve */ - tmp_w = mf->width; - tmp_h = mf->height; - - /* width <= max_width && height <= max_height - guaranteed by try_fmt */ - while ((width > tmp_w || height > tmp_h) && - tmp_w < max_width && tmp_h < max_height) { - tmp_w = min(2 * tmp_w, max_width); - tmp_h = min(2 * tmp_h, max_height); - mf->width = tmp_w; - mf->height = tmp_h; - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, NULL, format); - dev_geo(dev, "Camera scaled to %ux%u\n", - mf->width, mf->height); - if (ret < 0) { - /* This shouldn't happen */ - dev_err(dev, "Client failed to set format: %d\n", ret); - return ret; - } - } - -update_cache: - /* Update cache */ - ret = soc_camera_client_g_rect(sd, rect); - if (ret < 0) - return ret; - - if (host_1to1) - *subrect = *rect; - else - move_and_crop_subrect(rect, subrect); - - return 0; -} - -/** - * soc_camera_client_scale - * @icd: soc-camera device - * @rect: camera cropping window - * @subrect: part of rect, sent to the user - * @mf: in- / output camera output window - * @width: on input: max host input width; - * on output: user width, mapped back to input - * @height: on input: max host input height; - * on output: user height, mapped back to input - * @host_can_scale: host can scale this pixel format - * @shift: shift, used for scaling - */ -int soc_camera_client_scale(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - struct v4l2_mbus_framefmt *mf, - unsigned int *width, unsigned int *height, - bool host_can_scale, unsigned int shift) -{ - struct device *dev = icd->parent; - struct v4l2_subdev_format fmt_tmp = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .format = *mf, - }; - struct v4l2_mbus_framefmt *mf_tmp = &fmt_tmp.format; - unsigned int scale_h, scale_v; - int ret; - - /* - * 5. Apply iterative camera S_FMT for camera user window (also updates - * client crop cache and the imaginary sub-rectangle). - */ - ret = client_set_fmt(icd, rect, subrect, *width, *height, - &fmt_tmp, host_can_scale); - if (ret < 0) - return ret; - - dev_geo(dev, "5: camera scaled to %ux%u\n", - mf_tmp->width, mf_tmp->height); - - /* 6. Retrieve camera output window (g_fmt) */ - - /* unneeded - it is already in "mf_tmp" */ - - /* 7. Calculate new client scales. */ - scale_h = soc_camera_calc_scale(rect->width, shift, mf_tmp->width); - scale_v = soc_camera_calc_scale(rect->height, shift, mf_tmp->height); - - mf->width = mf_tmp->width; - mf->height = mf_tmp->height; - mf->colorspace = mf_tmp->colorspace; - - /* - * 8. Calculate new host crop - apply camera scales to previously - * updated "effective" crop. - */ - *width = soc_camera_shift_scale(subrect->width, shift, scale_h); - *height = soc_camera_shift_scale(subrect->height, shift, scale_v); - - dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height); - - return 0; -} -EXPORT_SYMBOL(soc_camera_client_scale); - -/* - * Calculate real client output window by applying new scales to the current - * client crop. New scales are calculated from the requested output format and - * host crop, mapped backed onto the client input (subrect). - */ -void soc_camera_calc_client_output(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf, - unsigned int shift) -{ - struct device *dev = icd->parent; - unsigned int scale_v, scale_h; - - if (subrect->width == rect->width && - subrect->height == rect->height) { - /* No sub-cropping */ - mf->width = pix->width; - mf->height = pix->height; - return; - } - - /* 1.-2. Current camera scales and subwin - cached. */ - - dev_geo(dev, "2: subwin %ux%u@%u:%u\n", - subrect->width, subrect->height, - subrect->left, subrect->top); - - /* - * 3. Calculate new combined scales from input sub-window to requested - * user window. - */ - - /* - * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF - * (128x96) or larger than VGA. This and similar limitations have to be - * taken into account here. - */ - scale_h = soc_camera_calc_scale(subrect->width, shift, pix->width); - scale_v = soc_camera_calc_scale(subrect->height, shift, pix->height); - - dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v); - - /* - * 4. Calculate desired client output window by applying combined scales - * to client (real) input window. - */ - mf->width = soc_camera_shift_scale(rect->width, shift, scale_h); - mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); -} -EXPORT_SYMBOL(soc_camera_calc_client_output); - -MODULE_DESCRIPTION("soc-camera scaling-cropping functions"); -MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.h b/drivers/media/platform/soc_camera/soc_scale_crop.h deleted file mode 100644 index 9ca469312a1f..000000000000 --- a/drivers/media/platform/soc_camera/soc_scale_crop.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * soc-camera generic scaling-cropping manipulation functions - * - * Copyright (C) 2013 Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef SOC_SCALE_CROP_H -#define SOC_SCALE_CROP_H - -#include <linux/kernel.h> - -struct soc_camera_device; - -struct v4l2_selection; -struct v4l2_mbus_framefmt; -struct v4l2_pix_format; -struct v4l2_rect; -struct v4l2_subdev; - -static inline unsigned int soc_camera_shift_scale(unsigned int size, - unsigned int shift, unsigned int scale) -{ - return DIV_ROUND_CLOSEST(size << shift, scale); -} - -#define soc_camera_calc_scale(in, shift, out) soc_camera_shift_scale(in, shift, out) - -int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect); -int soc_camera_client_s_selection(struct v4l2_subdev *sd, - struct v4l2_selection *sel, struct v4l2_selection *cam_sel, - struct v4l2_rect *target_rect, struct v4l2_rect *subrect); -int soc_camera_client_scale(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - struct v4l2_mbus_framefmt *mf, - unsigned int *width, unsigned int *height, - bool host_can_scale, unsigned int shift); -void soc_camera_calc_client_output(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf, - unsigned int shift); - -#endif diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c index c6a4e2de5c0c..77ca7517fa3e 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-debug.c +++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c @@ -315,7 +315,7 @@ static void bdisp_dbg_dump_ivmx(struct seq_file *s, seq_puts(s, "Unknown conversion\n"); } -static int bdisp_dbg_last_nodes(struct seq_file *s, void *data) +static int last_nodes_show(struct seq_file *s, void *data) { /* Not dumping all fields, focusing on significant ones */ struct bdisp_dev *bdisp = s->private; @@ -388,7 +388,7 @@ static int bdisp_dbg_last_nodes(struct seq_file *s, void *data) return 0; } -static int bdisp_dbg_last_nodes_raw(struct seq_file *s, void *data) +static int last_nodes_raw_show(struct seq_file *s, void *data) { struct bdisp_dev *bdisp = s->private; struct bdisp_node *node; @@ -437,7 +437,7 @@ static const char *bdisp_fmt_to_str(struct bdisp_frame frame) } } -static int bdisp_dbg_last_request(struct seq_file *s, void *data) +static int last_request_show(struct seq_file *s, void *data) { struct bdisp_dev *bdisp = s->private; struct bdisp_request *request = &bdisp->dbg.copy_request; @@ -474,7 +474,7 @@ static int bdisp_dbg_last_request(struct seq_file *s, void *data) #define DUMP(reg) seq_printf(s, #reg " \t0x%08X\n", readl(bdisp->regs + reg)) -static int bdisp_dbg_regs(struct seq_file *s, void *data) +static int regs_show(struct seq_file *s, void *data) { struct bdisp_dev *bdisp = s->private; int ret; @@ -582,7 +582,7 @@ static int bdisp_dbg_regs(struct seq_file *s, void *data) #define SECOND 1000000 -static int bdisp_dbg_perf(struct seq_file *s, void *data) +static int perf_show(struct seq_file *s, void *data) { struct bdisp_dev *bdisp = s->private; struct bdisp_request *request = &bdisp->dbg.copy_request; @@ -627,27 +627,15 @@ static int bdisp_dbg_perf(struct seq_file *s, void *data) return 0; } -#define bdisp_dbg_declare(name) \ - static int bdisp_dbg_##name##_open(struct inode *i, struct file *f) \ - { \ - return single_open(f, bdisp_dbg_##name, i->i_private); \ - } \ - static const struct file_operations bdisp_dbg_##name##_fops = { \ - .open = bdisp_dbg_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ - } - #define bdisp_dbg_create_entry(name) \ debugfs_create_file(#name, S_IRUGO, bdisp->dbg.debugfs_entry, bdisp, \ - &bdisp_dbg_##name##_fops) + &name##_fops) -bdisp_dbg_declare(regs); -bdisp_dbg_declare(last_nodes); -bdisp_dbg_declare(last_nodes_raw); -bdisp_dbg_declare(last_request); -bdisp_dbg_declare(perf); +DEFINE_SHOW_ATTRIBUTE(regs); +DEFINE_SHOW_ATTRIBUTE(last_nodes); +DEFINE_SHOW_ATTRIBUTE(last_nodes_raw); +DEFINE_SHOW_ATTRIBUTE(last_request); +DEFINE_SHOW_ATTRIBUTE(perf); int bdisp_debugfs_create(struct bdisp_dev *bdisp) { diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h index 3dbb3a287cc0..c9d6021904cd 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h @@ -193,7 +193,7 @@ struct c8sectpfei { #define C8SECTPFE_SYS_ENABLE BIT(0) /* - * Ponter record data structure required for each input block + * Pointer record data structure required for each input block * see Table 82 on page 167 of functional specification. */ diff --git a/drivers/media/platform/sti/delta/delta.h b/drivers/media/platform/sti/delta/delta.h index 2ba99922c05b..914556030e70 100644 --- a/drivers/media/platform/sti/delta/delta.h +++ b/drivers/media/platform/sti/delta/delta.h @@ -286,7 +286,7 @@ struct delta_dec { * Header parsing must be done using decode(), giving * explicitly header access unit or first access unit of bitstream. * If no valid header is found, get_streaminfo will return -ENODATA, - * in this case the next bistream access unit must be decoded till + * in this case the next bitstream access unit must be decoded till * get_streaminfo becomes successful. */ int (*get_streaminfo)(struct delta_ctx *ctx, diff --git a/drivers/media/platform/sti/hva/hva-debugfs.c b/drivers/media/platform/sti/hva/hva-debugfs.c index 9f7e8ac875d1..7d12a5b5d914 100644 --- a/drivers/media/platform/sti/hva/hva-debugfs.c +++ b/drivers/media/platform/sti/hva/hva-debugfs.c @@ -271,7 +271,7 @@ static void hva_dbg_perf_compute(struct hva_ctx *ctx) * device debug info */ -static int hva_dbg_device(struct seq_file *s, void *data) +static int device_show(struct seq_file *s, void *data) { struct hva_dev *hva = s->private; @@ -281,7 +281,7 @@ static int hva_dbg_device(struct seq_file *s, void *data) return 0; } -static int hva_dbg_encoders(struct seq_file *s, void *data) +static int encoders_show(struct seq_file *s, void *data) { struct hva_dev *hva = s->private; unsigned int i = 0; @@ -299,7 +299,7 @@ static int hva_dbg_encoders(struct seq_file *s, void *data) return 0; } -static int hva_dbg_last(struct seq_file *s, void *data) +static int last_show(struct seq_file *s, void *data) { struct hva_dev *hva = s->private; struct hva_ctx *last_ctx = &hva->dbg.last_ctx; @@ -316,7 +316,7 @@ static int hva_dbg_last(struct seq_file *s, void *data) return 0; } -static int hva_dbg_regs(struct seq_file *s, void *data) +static int regs_show(struct seq_file *s, void *data) { struct hva_dev *hva = s->private; @@ -325,26 +325,14 @@ static int hva_dbg_regs(struct seq_file *s, void *data) return 0; } -#define hva_dbg_declare(name) \ - static int hva_dbg_##name##_open(struct inode *i, struct file *f) \ - { \ - return single_open(f, hva_dbg_##name, i->i_private); \ - } \ - static const struct file_operations hva_dbg_##name##_fops = { \ - .open = hva_dbg_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ - } - #define hva_dbg_create_entry(name) \ debugfs_create_file(#name, 0444, hva->dbg.debugfs_entry, hva, \ - &hva_dbg_##name##_fops) + &name##_fops) -hva_dbg_declare(device); -hva_dbg_declare(encoders); -hva_dbg_declare(last); -hva_dbg_declare(regs); +DEFINE_SHOW_ATTRIBUTE(device); +DEFINE_SHOW_ATTRIBUTE(encoders); +DEFINE_SHOW_ATTRIBUTE(last); +DEFINE_SHOW_ATTRIBUTE(regs); void hva_debugfs_create(struct hva_dev *hva) { @@ -380,7 +368,7 @@ void hva_debugfs_remove(struct hva_dev *hva) * context (instance) debug info */ -static int hva_dbg_ctx(struct seq_file *s, void *data) +static int ctx_show(struct seq_file *s, void *data) { struct hva_ctx *ctx = s->private; @@ -392,7 +380,7 @@ static int hva_dbg_ctx(struct seq_file *s, void *data) return 0; } -hva_dbg_declare(ctx); +DEFINE_SHOW_ATTRIBUTE(ctx); void hva_dbg_ctx_create(struct hva_ctx *ctx) { @@ -407,7 +395,7 @@ void hva_dbg_ctx_create(struct hva_ctx *ctx) ctx->dbg.debugfs_entry = debugfs_create_file(name, 0444, hva->dbg.debugfs_entry, - ctx, &hva_dbg_ctx_fops); + ctx, &ctx_fops); } void hva_dbg_ctx_remove(struct hva_ctx *ctx) diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c index b61a5d337d2a..c34f7cf5aed2 100644 --- a/drivers/media/platform/sti/hva/hva-h264.c +++ b/drivers/media/platform/sti/hva/hva-h264.c @@ -626,7 +626,7 @@ static int hva_h264_prepare_task(struct hva_ctx *pctx, td->frame_width = frame_width; td->frame_height = frame_height; - /* set frame alignement */ + /* set frame alignment */ td->window_width = frame_width; td->window_height = frame_height; td->window_horizontal_offset = 0; diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index 6732874114cf..5fe5b38fa901 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -1544,7 +1544,7 @@ static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier, dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev)); - /* Checks internaly if vdev has been init or not */ + /* Checks internally if vdev has been init or not */ video_unregister_device(dcmi->vdev); } diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c index 6950585edb5a..4c79eb64a7a7 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c @@ -15,6 +15,7 @@ #include <linux/ioctl.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> @@ -143,6 +144,15 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, break; } break; + + case V4L2_PIX_FMT_RGB565: + return (mbus_code == MEDIA_BUS_FMT_RGB565_2X8_LE); + case V4L2_PIX_FMT_RGB565X: + return (mbus_code == MEDIA_BUS_FMT_RGB565_2X8_BE); + + case V4L2_PIX_FMT_JPEG: + return (mbus_code == MEDIA_BUS_FMT_JPEG_1X8); + default: dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat); break; @@ -154,6 +164,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) { struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); + struct device *dev = sdev->dev; struct regmap *regmap = sdev->regmap; int ret; @@ -161,6 +172,9 @@ int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, 0); clk_disable_unprepare(sdev->clk_ram); + if (of_device_is_compatible(dev->of_node, + "allwinner,sun50i-a64-csi")) + clk_rate_exclusive_put(sdev->clk_mod); clk_disable_unprepare(sdev->clk_mod); reset_control_assert(sdev->rstc_bus); return 0; @@ -172,6 +186,9 @@ int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) return ret; } + if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi")) + clk_set_rate_exclusive(sdev->clk_mod, 300000000); + ret = clk_prepare_enable(sdev->clk_ram); if (ret) { dev_err(sdev->dev, "Enable clk_dram_csi clk err %d\n", ret); @@ -191,6 +208,8 @@ int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) clk_ram_disable: clk_disable_unprepare(sdev->clk_ram); clk_mod_disable: + if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi")) + clk_rate_exclusive_put(sdev->clk_mod); clk_disable_unprepare(sdev->clk_mod); return ret; } @@ -198,8 +217,8 @@ clk_mod_disable: static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev, u32 mbus_code, u32 pixformat) { - /* bayer */ - if ((mbus_code & 0xF000) == 0x3000) + /* non-YUV */ + if ((mbus_code & 0xF000) != 0x2000) return CSI_INPUT_FORMAT_RAW; switch (pixformat) { @@ -268,6 +287,14 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev, case V4L2_PIX_FMT_YUV422P: return buf_interlaced ? CSI_FRAME_PLANAR_YUV422 : CSI_FIELD_PLANAR_YUV422; + + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + return buf_interlaced ? CSI_FRAME_RGB565 : CSI_FIELD_RGB565; + + case V4L2_PIX_FMT_JPEG: + return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8; + default: dev_warn(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat); break; @@ -279,6 +306,10 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev, static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, u32 mbus_code, u32 pixformat) { + /* Input sequence does not apply to non-YUV formats */ + if ((mbus_code & 0xF000) != 0x2000) + return 0; + switch (pixformat) { case V4L2_PIX_FMT_HM12: case V4L2_PIX_FMT_NV12: @@ -793,7 +824,7 @@ static const struct regmap_config sun6i_csi_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = 0x1000, + .max_register = 0x9c, }; static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev, @@ -893,7 +924,9 @@ static int sun6i_csi_remove(struct platform_device *pdev) static const struct of_device_id sun6i_csi_of_match[] = { { .compatible = "allwinner,sun6i-a31-csi", }, + { .compatible = "allwinner,sun8i-h3-csi", }, { .compatible = "allwinner,sun8i-v3s-csi", }, + { .compatible = "allwinner,sun50i-a64-csi", }, {}, }; MODULE_DEVICE_TABLE(of, sun6i_csi_of_match); diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h index 0bb000712c33..c626821aaedb 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h @@ -65,7 +65,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, u32 pixformat, int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable); /** - * sun6i_csi_update_config() - update the csi register setttings + * sun6i_csi_update_config() - update the csi register settings * @csi: pointer to the csi * @config: see struct sun6i_csi_config */ @@ -94,6 +94,7 @@ static inline int sun6i_csi_get_bpp(unsigned int pixformat) case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SRGGB8: + case V4L2_PIX_FMT_JPEG: return 8; case V4L2_PIX_FMT_SBGGR10: case V4L2_PIX_FMT_SGBRG10: @@ -117,6 +118,8 @@ static inline int sun6i_csi_get_bpp(unsigned int pixformat) case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: case V4L2_PIX_FMT_YUV422P: + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: return 16; case V4L2_PIX_FMT_RGB24: case V4L2_PIX_FMT_BGR24: diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c index b04300c3811f..1fd16861f111 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c @@ -56,6 +56,9 @@ static const u32 supported_pixformats[] = { V4L2_PIX_FMT_NV16, V4L2_PIX_FMT_NV61, V4L2_PIX_FMT_YUV422P, + V4L2_PIX_FMT_RGB565, + V4L2_PIX_FMT_RGB565X, + V4L2_PIX_FMT_JPEG, }; static bool is_pixformat_valid(unsigned int pixformat) diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c index e2cf2b90e500..78d716c93649 100644 --- a/drivers/media/platform/ti-vpe/vpdma.c +++ b/drivers/media/platform/ti-vpe/vpdma.c @@ -404,7 +404,7 @@ EXPORT_SYMBOL(vpdma_map_desc_buf); /* * unmap descriptor/payload DMA buffer, disabling DMA access and - * allowing the main processor to acces the data + * allowing the main processor to access the data */ void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf) { @@ -501,7 +501,7 @@ void vpdma_reset_desc_list(struct vpdma_desc_list *list) EXPORT_SYMBOL(vpdma_reset_desc_list); /* - * free the buffer allocated fot the VPDMA descriptor list, this should be + * free the buffer allocated for the VPDMA descriptor list, this should be * called when the user doesn't want to use VPDMA any more. */ void vpdma_free_desc_list(struct vpdma_desc_list *list) @@ -790,7 +790,7 @@ static void dump_dtd(struct vpdma_dtd *dtd) * append an outbound data transfer descriptor to the given descriptor list, * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel * - * @list: vpdma desc list to which we add this decriptor + * @list: vpdma desc list to which we add this descriptor * @width: width of the image in pixels in memory * @c_rect: compose params of output image * @fmt: vpdma data format of the buffer @@ -798,7 +798,7 @@ static void dump_dtd(struct vpdma_dtd *dtd) * max_width: enum for maximum width of data transfer * max_height: enum for maximum height of data transfer * chan: VPDMA channel - * flags: VPDMA flags to configure some descriptor fileds + * flags: VPDMA flags to configure some descriptor fields */ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width, int stride, const struct v4l2_rect *c_rect, @@ -863,14 +863,14 @@ EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd); * append an inbound data transfer descriptor to the given descriptor list, * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel * - * @list: vpdma desc list to which we add this decriptor + * @list: vpdma desc list to which we add this descriptor * @width: width of the image in pixels in memory(not the cropped width) * @c_rect: crop params of input image * @fmt: vpdma data format of the buffer * dma_addr: dma address as seen by VPDMA * chan: VPDMA channel * field: top or bottom field info of the input image - * flags: VPDMA flags to configure some descriptor fileds + * flags: VPDMA flags to configure some descriptor fields * frame_width/height: the complete width/height of the image presented to the * client (this makes sense when multiple channels are * connected to the same client, forming a larger frame) @@ -1008,7 +1008,7 @@ unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num) } EXPORT_SYMBOL(vpdma_get_list_mask); -/* clear previosuly occured list intterupts in the LIST_STAT register */ +/* clear previously occurred list interrupts in the LIST_STAT register */ void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num, int list_num) { diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c index d70871d0ad2d..207e7e76c048 100644 --- a/drivers/media/platform/ti-vpe/vpe.c +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -876,7 +876,7 @@ static int set_srcdst_params(struct vpe_ctx *ctx) /* * we make sure that the source image has a 16 byte aligned * stride, we need to do the same for the motion vector buffer - * by aligning it's stride to the next 16 byte boundry. this + * by aligning it's stride to the next 16 byte boundary. this * extra space will not be used by the de-interlacer, but will * ensure that vpdma operates correctly */ diff --git a/drivers/media/platform/vicodec/codec-fwht.c b/drivers/media/platform/vicodec/codec-fwht.c index 5630f1dc45e6..d1d6085da9f1 100644 --- a/drivers/media/platform/vicodec/codec-fwht.c +++ b/drivers/media/platform/vicodec/codec-fwht.c @@ -10,8 +10,11 @@ */ #include <linux/string.h> +#include <linux/kernel.h> #include "codec-fwht.h" +#define OVERFLOW_BIT BIT(14) + /* * Note: bit 0 of the header must always be 0. Otherwise it cannot * be guaranteed that the magic 8 byte sequence (see below) can @@ -103,16 +106,21 @@ static int rlc(const s16 *in, __be16 *output, int blocktype) * This function will worst-case increase rlc_in by 65*2 bytes: * one s16 value for the header and 8 * 8 coefficients of type s16. */ -static s16 derlc(const __be16 **rlc_in, s16 *dwht_out) +static u16 derlc(const __be16 **rlc_in, s16 *dwht_out, + const __be16 *end_of_input) { /* header */ const __be16 *input = *rlc_in; - s16 ret = ntohs(*input++); + u16 stat; int dec_count = 0; s16 block[8 * 8 + 16]; s16 *wp = block; int i; + if (input > end_of_input) + return OVERFLOW_BIT; + stat = ntohs(*input++); + /* * Now de-compress, it expands one byte to up to 15 bytes * (or fills the remainder of the 64 bytes with zeroes if it @@ -122,9 +130,15 @@ static s16 derlc(const __be16 **rlc_in, s16 *dwht_out) * allow for overflow if the incoming data was malformed. */ while (dec_count < 8 * 8) { - s16 in = ntohs(*input++); - int length = in & 0xf; - int coeff = in >> 4; + s16 in; + int length; + int coeff; + + if (input > end_of_input) + return OVERFLOW_BIT; + in = ntohs(*input++); + length = in & 0xf; + coeff = in >> 4; /* fill remainder with zeros */ if (length == 15) { @@ -149,7 +163,7 @@ static s16 derlc(const __be16 **rlc_in, s16 *dwht_out) dwht_out[x + y * 8] = *wp++; } *rlc_in = input; - return ret; + return stat; } static const int quant_table[] = { @@ -237,8 +251,6 @@ static void fwht(const u8 *block, s16 *output_block, unsigned int stride, unsigned int i; /* stage 1 */ - stride *= input_step; - for (i = 0; i < 8; i++, tmp += stride, out += 8) { switch (input_step) { case 1: @@ -562,7 +574,7 @@ static void fill_encoder_block(const u8 *input, s16 *dst, for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++, input += input_step) *dst++ = *input; - input += (stride - 8) * input_step; + input += stride - 8 * input_step; } } @@ -660,7 +672,7 @@ static void add_deltas(s16 *deltas, const u8 *ref, int stride) static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max, struct fwht_cframe *cf, u32 height, u32 width, - unsigned int input_step, + u32 stride, unsigned int input_step, bool is_intra, bool next_is_intra) { u8 *input_start = input; @@ -671,7 +683,11 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max, unsigned int last_size = 0; unsigned int i, j; + width = round_up(width, 8); + height = round_up(height, 8); + for (j = 0; j < height / 8; j++) { + input = input_start + j * 8 * stride; for (i = 0; i < width / 8; i++) { /* intra code, first frame is always intra coded. */ int blocktype = IBLOCK; @@ -679,9 +695,9 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max, if (!is_intra) blocktype = decide_blocktype(input, refp, - deltablock, width, input_step); + deltablock, stride, input_step); if (blocktype == IBLOCK) { - fwht(input, cf->coeffs, width, input_step, 1); + fwht(input, cf->coeffs, stride, input_step, 1); quantize_intra(cf->coeffs, cf->de_coeffs, cf->i_frame_qp); } else { @@ -722,12 +738,12 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max, } last_size = size; } - input += width * 7 * input_step; } exit_loop: if (encoding & FWHT_FRAME_UNENCODED) { u8 *out = (u8 *)rlco_start; + u8 *p; input = input_start; /* @@ -736,8 +752,11 @@ exit_loop: * by 0xfe. Since YUV is limited range such values * shouldn't appear anyway. */ - for (i = 0; i < height * width; i++, input += input_step) - *out++ = (*input == 0xff) ? 0xfe : *input; + for (j = 0; j < height; j++) { + for (i = 0, p = input; i < width; i++, p += input_step) + *out++ = (*p == 0xff) ? 0xfe : *p; + input += stride; + } *rlco = (__be16 *)out; encoding &= ~FWHT_FRAME_PCODED; } @@ -747,30 +766,32 @@ exit_loop: u32 fwht_encode_frame(struct fwht_raw_frame *frm, struct fwht_raw_frame *ref_frm, struct fwht_cframe *cf, - bool is_intra, bool next_is_intra) + bool is_intra, bool next_is_intra, + unsigned int width, unsigned int height, + unsigned int stride, unsigned int chroma_stride) { - unsigned int size = frm->height * frm->width; + unsigned int size = height * width; __be16 *rlco = cf->rlc_data; __be16 *rlco_max; u32 encoding; rlco_max = rlco + size / 2 - 256; encoding = encode_plane(frm->luma, ref_frm->luma, &rlco, rlco_max, cf, - frm->height, frm->width, + height, width, stride, frm->luma_alpha_step, is_intra, next_is_intra); if (encoding & FWHT_FRAME_UNENCODED) encoding |= FWHT_LUMA_UNENCODED; encoding &= ~FWHT_FRAME_UNENCODED; if (frm->components_num >= 3) { - u32 chroma_h = frm->height / frm->height_div; - u32 chroma_w = frm->width / frm->width_div; + u32 chroma_h = height / frm->height_div; + u32 chroma_w = width / frm->width_div; unsigned int chroma_size = chroma_h * chroma_w; rlco_max = rlco + chroma_size / 2 - 256; encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max, cf, chroma_h, chroma_w, - frm->chroma_step, + chroma_stride, frm->chroma_step, is_intra, next_is_intra); if (encoding & FWHT_FRAME_UNENCODED) encoding |= FWHT_CB_UNENCODED; @@ -778,7 +799,7 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm, rlco_max = rlco + chroma_size / 2 - 256; encoding |= encode_plane(frm->cr, ref_frm->cr, &rlco, rlco_max, cf, chroma_h, chroma_w, - frm->chroma_step, + chroma_stride, frm->chroma_step, is_intra, next_is_intra); if (encoding & FWHT_FRAME_UNENCODED) encoding |= FWHT_CR_UNENCODED; @@ -787,10 +808,10 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm, if (frm->components_num == 4) { rlco_max = rlco + size / 2 - 256; - encoding = encode_plane(frm->alpha, ref_frm->alpha, &rlco, - rlco_max, cf, frm->height, frm->width, - frm->luma_alpha_step, - is_intra, next_is_intra); + encoding |= encode_plane(frm->alpha, ref_frm->alpha, &rlco, + rlco_max, cf, height, width, + stride, frm->luma_alpha_step, + is_intra, next_is_intra); if (encoding & FWHT_FRAME_UNENCODED) encoding |= FWHT_ALPHA_UNENCODED; encoding &= ~FWHT_FRAME_UNENCODED; @@ -800,18 +821,24 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm, return encoding; } -static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref, - u32 height, u32 width, bool uncompressed) +static bool decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref, + u32 height, u32 width, u32 coded_width, + bool uncompressed, const __be16 *end_of_rlco_buf) { unsigned int copies = 0; s16 copy[8 * 8]; - s16 stat; + u16 stat; unsigned int i, j; + width = round_up(width, 8); + height = round_up(height, 8); + if (uncompressed) { + if (end_of_rlco_buf + 1 < *rlco + width * height / 2) + return false; memcpy(ref, *rlco, width * height); *rlco += width * height / 2; - return; + return true; } /* @@ -822,19 +849,22 @@ static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref, */ for (j = 0; j < height / 8; j++) { for (i = 0; i < width / 8; i++) { - u8 *refp = ref + j * 8 * width + i * 8; + u8 *refp = ref + j * 8 * coded_width + i * 8; if (copies) { memcpy(cf->de_fwht, copy, sizeof(copy)); if (stat & PFRAME_BIT) - add_deltas(cf->de_fwht, refp, width); - fill_decoder_block(refp, cf->de_fwht, width); + add_deltas(cf->de_fwht, refp, + coded_width); + fill_decoder_block(refp, cf->de_fwht, + coded_width); copies--; continue; } - stat = derlc(rlco, cf->coeffs); - + stat = derlc(rlco, cf->coeffs, end_of_rlco_buf); + if (stat & OVERFLOW_BIT) + return false; if (stat & PFRAME_BIT) dequantize_inter(cf->coeffs); else @@ -847,35 +877,53 @@ static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref, if (copies) memcpy(copy, cf->de_fwht, sizeof(copy)); if (stat & PFRAME_BIT) - add_deltas(cf->de_fwht, refp, width); - fill_decoder_block(refp, cf->de_fwht, width); + add_deltas(cf->de_fwht, refp, coded_width); + fill_decoder_block(refp, cf->de_fwht, coded_width); } } + return true; } -void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref, - u32 hdr_flags, unsigned int components_num) +bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref, + u32 hdr_flags, unsigned int components_num, + unsigned int width, unsigned int height, + unsigned int coded_width) { const __be16 *rlco = cf->rlc_data; + const __be16 *end_of_rlco_buf = cf->rlc_data + + (cf->size / sizeof(*rlco)) - 1; - decode_plane(cf, &rlco, ref->luma, cf->height, cf->width, - hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED); + if (!decode_plane(cf, &rlco, ref->luma, height, width, coded_width, + hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED, + end_of_rlco_buf)) + return false; if (components_num >= 3) { - u32 h = cf->height; - u32 w = cf->width; + u32 h = height; + u32 w = width; + u32 c = coded_width; if (!(hdr_flags & FWHT_FL_CHROMA_FULL_HEIGHT)) h /= 2; - if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH)) + if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH)) { w /= 2; - decode_plane(cf, &rlco, ref->cb, h, w, - hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED); - decode_plane(cf, &rlco, ref->cr, h, w, - hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED); + c /= 2; + } + if (!decode_plane(cf, &rlco, ref->cb, h, w, c, + hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED, + end_of_rlco_buf)) + return false; + if (!decode_plane(cf, &rlco, ref->cr, h, w, c, + hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED, + end_of_rlco_buf)) + return false; } if (components_num == 4) - decode_plane(cf, &rlco, ref->alpha, cf->height, cf->width, - hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED); + if (!decode_plane(cf, &rlco, ref->alpha, height, width, + coded_width, + hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED, + end_of_rlco_buf)) + return false; + return true; } diff --git a/drivers/media/platform/vicodec/codec-fwht.h b/drivers/media/platform/vicodec/codec-fwht.h index 90ff8962fca7..c410512d47c5 100644 --- a/drivers/media/platform/vicodec/codec-fwht.h +++ b/drivers/media/platform/vicodec/codec-fwht.h @@ -56,7 +56,7 @@ #define FWHT_MAGIC1 0x4f4f4f4f #define FWHT_MAGIC2 0xffffffff -#define FWHT_VERSION 2 +#define FWHT_VERSION 3 /* Set if this is an interlaced format */ #define FWHT_FL_IS_INTERLACED BIT(0) @@ -76,11 +76,25 @@ #define FWHT_FL_CHROMA_FULL_HEIGHT BIT(7) #define FWHT_FL_CHROMA_FULL_WIDTH BIT(8) #define FWHT_FL_ALPHA_IS_UNCOMPRESSED BIT(9) +#define FWHT_FL_I_FRAME BIT(10) /* A 4-values flag - the number of components - 1 */ -#define FWHT_FL_COMPONENTS_NUM_MSK GENMASK(17, 16) +#define FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16) #define FWHT_FL_COMPONENTS_NUM_OFFSET 16 +#define FWHT_FL_PIXENC_MSK GENMASK(20, 19) +#define FWHT_FL_PIXENC_OFFSET 19 +#define FWHT_FL_PIXENC_YUV (1 << FWHT_FL_PIXENC_OFFSET) +#define FWHT_FL_PIXENC_RGB (2 << FWHT_FL_PIXENC_OFFSET) +#define FWHT_FL_PIXENC_HSV (3 << FWHT_FL_PIXENC_OFFSET) + +/* + * A macro to calculate the needed padding in order to make sure + * both luma and chroma components resolutions are rounded up to + * a multiple of 8 + */ +#define vic_round_dim(dim, div) (round_up((dim) / (div), 8) * (div)) + struct fwht_cframe_hdr { u32 magic1; u32 magic2; @@ -95,7 +109,6 @@ struct fwht_cframe_hdr { }; struct fwht_cframe { - unsigned int width, height; u16 i_frame_qp; u16 p_frame_qp; __be16 *rlc_data; @@ -106,7 +119,6 @@ struct fwht_cframe { }; struct fwht_raw_frame { - unsigned int width, height; unsigned int width_div; unsigned int height_div; unsigned int luma_alpha_step; @@ -125,8 +137,12 @@ struct fwht_raw_frame { u32 fwht_encode_frame(struct fwht_raw_frame *frm, struct fwht_raw_frame *ref_frm, struct fwht_cframe *cf, - bool is_intra, bool next_is_intra); -void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref, - u32 hdr_flags, unsigned int components_num); + bool is_intra, bool next_is_intra, + unsigned int width, unsigned int height, + unsigned int stride, unsigned int chroma_stride); +bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref, + u32 hdr_flags, unsigned int components_num, + unsigned int width, unsigned int height, + unsigned int coded_width); #endif diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c index 8cb0212df67f..6573a471c5ca 100644 --- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c +++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c @@ -11,32 +11,53 @@ #include "codec-v4l2-fwht.h" static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = { - { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3}, - { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3}, - { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3}, - { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3}, - { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3}, - { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3}, - { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3}, - { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3}, - { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3}, - { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3}, - { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3}, - { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3}, - { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3}, - { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3}, - { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3}, - { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3}, - { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4}, - { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4}, - { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1}, + { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3, 3, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV}, + { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV}, + { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB}, }; +const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div, + u32 height_div, + u32 components_num, + u32 pixenc, + unsigned int start_idx) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(v4l2_fwht_pixfmts); i++) { + if (v4l2_fwht_pixfmts[i].width_div == width_div && + v4l2_fwht_pixfmts[i].height_div == height_div && + (!pixenc || v4l2_fwht_pixfmts[i].pixenc == pixenc) && + v4l2_fwht_pixfmts[i].components_num == components_num) { + if (start_idx == 0) + return v4l2_fwht_pixfmts + i; + start_idx--; + } + } + return NULL; +} + const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat) { unsigned int i; @@ -56,7 +77,8 @@ const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx) int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) { - unsigned int size = state->width * state->height; + unsigned int size = state->stride * state->coded_height; + unsigned int chroma_stride = state->stride; const struct v4l2_fwht_pixfmt_info *info = state->info; struct fwht_cframe_hdr *p_hdr; struct fwht_cframe cf; @@ -66,8 +88,7 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) if (!info) return -EINVAL; - rf.width = state->width; - rf.height = state->height; + rf.luma = p_in; rf.width_div = info->width_div; rf.height_div = info->height_div; @@ -84,14 +105,17 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) case V4L2_PIX_FMT_YUV420: rf.cb = rf.luma + size; rf.cr = rf.cb + size / 4; + chroma_stride /= 2; break; case V4L2_PIX_FMT_YVU420: rf.cr = rf.luma + size; rf.cb = rf.cr + size / 4; + chroma_stride /= 2; break; case V4L2_PIX_FMT_YUV422P: rf.cb = rf.luma + size; rf.cr = rf.cb + size / 2; + chroma_stride /= 2; break; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: @@ -163,15 +187,16 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) return -EINVAL; } - cf.width = state->width; - cf.height = state->height; cf.i_frame_qp = state->i_frame_qp; cf.p_frame_qp = state->p_frame_qp; cf.rlc_data = (__be16 *)(p_out + sizeof(*p_hdr)); encoding = fwht_encode_frame(&rf, &state->ref_frame, &cf, !state->gop_cnt, - state->gop_cnt == state->gop_size - 1); + state->gop_cnt == state->gop_size - 1, + state->visible_width, + state->visible_height, + state->stride, chroma_stride); if (!(encoding & FWHT_FRAME_PCODED)) state->gop_cnt = 0; if (++state->gop_cnt >= state->gop_size) @@ -181,9 +206,10 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) p_hdr->magic1 = FWHT_MAGIC1; p_hdr->magic2 = FWHT_MAGIC2; p_hdr->version = htonl(FWHT_VERSION); - p_hdr->width = htonl(cf.width); - p_hdr->height = htonl(cf.height); + p_hdr->width = htonl(state->visible_width); + p_hdr->height = htonl(state->visible_height); flags |= (info->components_num - 1) << FWHT_FL_COMPONENTS_NUM_OFFSET; + flags |= info->pixenc; if (encoding & FWHT_LUMA_UNENCODED) flags |= FWHT_FL_LUMA_IS_UNCOMPRESSED; if (encoding & FWHT_CB_UNENCODED) @@ -192,6 +218,8 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) flags |= FWHT_FL_CR_IS_UNCOMPRESSED; if (encoding & FWHT_ALPHA_UNENCODED) flags |= FWHT_FL_ALPHA_IS_UNCOMPRESSED; + if (!(encoding & FWHT_FRAME_PCODED)) + flags |= FWHT_FL_I_FRAME; if (rf.height_div == 1) flags |= FWHT_FL_CHROMA_FULL_HEIGHT; if (rf.width_div == 1) @@ -202,65 +230,70 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) p_hdr->ycbcr_enc = htonl(state->ycbcr_enc); p_hdr->quantization = htonl(state->quantization); p_hdr->size = htonl(cf.size); - state->ref_frame.width = cf.width; - state->ref_frame.height = cf.height; return cf.size + sizeof(*p_hdr); } int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) { - unsigned int size = state->width * state->height; - unsigned int chroma_size = size; - unsigned int i; + unsigned int i, j, k; u32 flags; - struct fwht_cframe_hdr *p_hdr; struct fwht_cframe cf; - u8 *p; + u8 *p, *ref_p; unsigned int components_num = 3; unsigned int version; + const struct v4l2_fwht_pixfmt_info *info; + unsigned int hdr_width_div, hdr_height_div; if (!state->info) return -EINVAL; - p_hdr = (struct fwht_cframe_hdr *)p_in; - cf.width = ntohl(p_hdr->width); - cf.height = ntohl(p_hdr->height); + info = state->info; - version = ntohl(p_hdr->version); + version = ntohl(state->header.version); if (!version || version > FWHT_VERSION) { pr_err("version %d is not supported, current version is %d\n", version, FWHT_VERSION); return -EINVAL; } - if (p_hdr->magic1 != FWHT_MAGIC1 || - p_hdr->magic2 != FWHT_MAGIC2 || - (cf.width & 7) || (cf.height & 7)) + if (state->header.magic1 != FWHT_MAGIC1 || + state->header.magic2 != FWHT_MAGIC2) return -EINVAL; /* TODO: support resolution changes */ - if (cf.width != state->width || cf.height != state->height) + if (ntohl(state->header.width) != state->visible_width || + ntohl(state->header.height) != state->visible_height) return -EINVAL; - flags = ntohl(p_hdr->flags); + flags = ntohl(state->header.flags); - if (version == FWHT_VERSION) { + if (version >= 2) { + if ((flags & FWHT_FL_PIXENC_MSK) != info->pixenc) + return -EINVAL; components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >> - FWHT_FL_COMPONENTS_NUM_OFFSET); + FWHT_FL_COMPONENTS_NUM_OFFSET); } - state->colorspace = ntohl(p_hdr->colorspace); - state->xfer_func = ntohl(p_hdr->xfer_func); - state->ycbcr_enc = ntohl(p_hdr->ycbcr_enc); - state->quantization = ntohl(p_hdr->quantization); - cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr)); + if (components_num != info->components_num) + return -EINVAL; - if (!(flags & FWHT_FL_CHROMA_FULL_WIDTH)) - chroma_size /= 2; - if (!(flags & FWHT_FL_CHROMA_FULL_HEIGHT)) - chroma_size /= 2; + state->colorspace = ntohl(state->header.colorspace); + state->xfer_func = ntohl(state->header.xfer_func); + state->ycbcr_enc = ntohl(state->header.ycbcr_enc); + state->quantization = ntohl(state->header.quantization); + cf.rlc_data = (__be16 *)p_in; + cf.size = ntohl(state->header.size); - fwht_decode_frame(&cf, &state->ref_frame, flags, components_num); + hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; + hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; + if (hdr_width_div != info->width_div || + hdr_height_div != info->height_div) + return -EINVAL; + + if (!fwht_decode_frame(&cf, &state->ref_frame, flags, components_num, + state->visible_width, state->visible_height, + state->coded_width)) + return -EINVAL; /* * TODO - handle the case where the compressed stream encodes a @@ -268,123 +301,226 @@ int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) */ switch (state->info->id) { case V4L2_PIX_FMT_GREY: - memcpy(p_out, state->ref_frame.luma, size); + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } break; case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YUV422P: - memcpy(p_out, state->ref_frame.luma, size); - p_out += size; - memcpy(p_out, state->ref_frame.cb, chroma_size); - p_out += chroma_size; - memcpy(p_out, state->ref_frame.cr, chroma_size); + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } + + ref_p = state->ref_frame.cb; + for (i = 0; i < state->coded_height / 2; i++) { + memcpy(p_out, ref_p, state->visible_width / 2); + p_out += state->stride / 2; + ref_p += state->coded_width / 2; + } + ref_p = state->ref_frame.cr; + for (i = 0; i < state->coded_height / 2; i++) { + memcpy(p_out, ref_p, state->visible_width / 2); + p_out += state->stride / 2; + ref_p += state->coded_width / 2; + } break; case V4L2_PIX_FMT_YVU420: - memcpy(p_out, state->ref_frame.luma, size); - p_out += size; - memcpy(p_out, state->ref_frame.cr, chroma_size); - p_out += chroma_size; - memcpy(p_out, state->ref_frame.cb, chroma_size); + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } + + ref_p = state->ref_frame.cr; + for (i = 0; i < state->coded_height / 2; i++) { + memcpy(p_out, ref_p, state->visible_width / 2); + p_out += state->stride / 2; + ref_p += state->coded_width / 2; + } + ref_p = state->ref_frame.cb; + for (i = 0; i < state->coded_height / 2; i++) { + memcpy(p_out, ref_p, state->visible_width / 2); + p_out += state->stride / 2; + ref_p += state->coded_width / 2; + } break; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV24: - memcpy(p_out, state->ref_frame.luma, size); - p_out += size; - for (i = 0, p = p_out; i < chroma_size; i++) { - *p++ = state->ref_frame.cb[i]; - *p++ = state->ref_frame.cr[i]; + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } + + k = 0; + for (i = 0; i < state->coded_height / 2; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.cb[k]; + *p++ = state->ref_frame.cr[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV61: case V4L2_PIX_FMT_NV42: - memcpy(p_out, state->ref_frame.luma, size); - p_out += size; - for (i = 0, p = p_out; i < chroma_size; i++) { - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.cb[i]; + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } + + k = 0; + for (i = 0; i < state->coded_height / 2; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.cb[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_YUYV: - for (i = 0, p = p_out; i < size; i += 2) { - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i / 2]; - *p++ = state->ref_frame.luma[i + 1]; - *p++ = state->ref_frame.cr[i / 2]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k / 2]; + *p++ = state->ref_frame.luma[k + 1]; + *p++ = state->ref_frame.cr[k / 2]; + k += 2; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_YVYU: - for (i = 0, p = p_out; i < size; i += 2) { - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i / 2]; - *p++ = state->ref_frame.luma[i + 1]; - *p++ = state->ref_frame.cb[i / 2]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k / 2]; + *p++ = state->ref_frame.luma[k + 1]; + *p++ = state->ref_frame.cb[k / 2]; + k += 2; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_UYVY: - for (i = 0, p = p_out; i < size; i += 2) { - *p++ = state->ref_frame.cb[i / 2]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i / 2]; - *p++ = state->ref_frame.luma[i + 1]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.cb[k / 2]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k / 2]; + *p++ = state->ref_frame.luma[k + 1]; + k += 2; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_VYUY: - for (i = 0, p = p_out; i < size; i += 2) { - *p++ = state->ref_frame.cr[i / 2]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i / 2]; - *p++ = state->ref_frame.luma[i + 1]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.cr[k / 2]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k / 2]; + *p++ = state->ref_frame.luma[k + 1]; + k += 2; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_RGB24: case V4L2_PIX_FMT_HSV24: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_BGR24: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.cb[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.cb[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_RGB32: case V4L2_PIX_FMT_XRGB32: case V4L2_PIX_FMT_HSV32: - for (i = 0, p = p_out; i < size; i++) { - *p++ = 0; - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = 0; + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_BGR32: case V4L2_PIX_FMT_XBGR32: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.cb[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i]; - *p++ = 0; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.cb[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k]; + *p++ = 0; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_ARGB32: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.alpha[i]; - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.alpha[k]; + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_ABGR32: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.cb[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.alpha[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.cb[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.alpha[k]; + k++; + } + p_out += state->stride; } break; default: diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.h b/drivers/media/platform/vicodec/codec-v4l2-fwht.h index ed53e28d4f9c..aa6fa90a48be 100644 --- a/drivers/media/platform/vicodec/codec-v4l2-fwht.h +++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.h @@ -19,12 +19,17 @@ struct v4l2_fwht_pixfmt_info { unsigned int width_div; unsigned int height_div; unsigned int components_num; + unsigned int planes_num; + unsigned int pixenc; }; struct v4l2_fwht_state { const struct v4l2_fwht_pixfmt_info *info; - unsigned int width; - unsigned int height; + unsigned int visible_width; + unsigned int visible_height; + unsigned int coded_width; + unsigned int coded_height; + unsigned int stride; unsigned int gop_size; unsigned int gop_cnt; u16 i_frame_qp; @@ -36,11 +41,17 @@ struct v4l2_fwht_state { enum v4l2_quantization quantization; struct fwht_raw_frame ref_frame; + struct fwht_cframe_hdr header; u8 *compressed_frame; }; const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat); const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx); +const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div, + u32 height_div, + u32 components_num, + u32 pixenc, + unsigned int start_idx); int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out); int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out); diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index 0d7876f5acf0..d7636fe9e174 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c @@ -61,7 +61,7 @@ struct pixfmt_info { }; static const struct v4l2_fwht_pixfmt_info pixfmt_fwht = { - V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0 + V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0, 1 }; static void vicodec_dev_release(struct device *dev) @@ -75,8 +75,10 @@ static struct platform_device vicodec_pdev = { /* Per-queue, driver-specific private data */ struct vicodec_q_data { - unsigned int width; - unsigned int height; + unsigned int coded_width; + unsigned int coded_height; + unsigned int visible_width; + unsigned int visible_height; unsigned int sizeimage; unsigned int sequence; const struct v4l2_fwht_pixfmt_info *info; @@ -122,10 +124,12 @@ struct vicodec_ctx { u32 cur_buf_offset; u32 comp_max_size; u32 comp_size; + u32 header_size; u32 comp_magic_cnt; - u32 comp_frame_size; bool comp_has_frame; bool comp_has_next_frame; + bool first_source_change_sent; + bool source_changed; }; static inline struct vicodec_ctx *file2ctx(struct file *file) @@ -182,6 +186,10 @@ static int device_process(struct vicodec_ctx *ctx, return ret; vb2_set_plane_payload(&dst_vb->vb2_buf, 0, ret); } else { + unsigned int comp_frame_size = ntohl(ctx->state.header.size); + + if (comp_frame_size > ctx->comp_max_size) + return -EINVAL; state->info = q_dst->info; ret = v4l2_fwht_decode(state, p_src, p_dst); if (ret < 0) @@ -190,18 +198,8 @@ static int device_process(struct vicodec_ctx *ctx, } dst_vb->sequence = q_dst->sequence++; - dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; - - if (src_vb->flags & V4L2_BUF_FLAG_TIMECODE) - dst_vb->timecode = src_vb->timecode; - dst_vb->field = src_vb->field; dst_vb->flags &= ~V4L2_BUF_FLAG_LAST; - dst_vb->flags |= src_vb->flags & - (V4L2_BUF_FLAG_TIMECODE | - V4L2_BUF_FLAG_KEYFRAME | - V4L2_BUF_FLAG_PFRAME | - V4L2_BUF_FLAG_BFRAME | - V4L2_BUF_FLAG_TSTAMP_SRC_MASK); + v4l2_m2m_buf_copy_metadata(src_vb, dst_vb, !ctx->is_enc); return 0; } @@ -209,6 +207,63 @@ static int device_process(struct vicodec_ctx *ctx, /* * mem2mem callbacks */ +static enum vb2_buffer_state get_next_header(struct vicodec_ctx *ctx, + u8 **pp, u32 sz) +{ + static const u8 magic[] = { + 0x4f, 0x4f, 0x4f, 0x4f, 0xff, 0xff, 0xff, 0xff + }; + u8 *p = *pp; + u32 state; + u8 *header = (u8 *)&ctx->state.header; + + state = VB2_BUF_STATE_DONE; + + if (!ctx->header_size) { + state = VB2_BUF_STATE_ERROR; + for (; p < *pp + sz; p++) { + u32 copy; + + p = memchr(p, magic[ctx->comp_magic_cnt], + *pp + sz - p); + if (!p) { + ctx->comp_magic_cnt = 0; + p = *pp + sz; + break; + } + copy = sizeof(magic) - ctx->comp_magic_cnt; + if (*pp + sz - p < copy) + copy = *pp + sz - p; + + memcpy(header + ctx->comp_magic_cnt, p, copy); + ctx->comp_magic_cnt += copy; + if (!memcmp(header, magic, ctx->comp_magic_cnt)) { + p += copy; + state = VB2_BUF_STATE_DONE; + break; + } + ctx->comp_magic_cnt = 0; + } + if (ctx->comp_magic_cnt < sizeof(magic)) { + *pp = p; + return state; + } + ctx->header_size = sizeof(magic); + } + + if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { + u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->header_size; + + if (*pp + sz - p < copy) + copy = *pp + sz - p; + + memcpy(header + ctx->header_size, p, copy); + p += copy; + ctx->header_size += copy; + } + *pp = p; + return state; +} /* device_run() - prepares and starts the device */ static void device_run(void *priv) @@ -249,6 +304,7 @@ static void device_run(void *priv) } v4l2_m2m_buf_done(dst_buf, state); ctx->comp_size = 0; + ctx->header_size = 0; ctx->comp_magic_cnt = 0; ctx->comp_has_frame = false; spin_unlock(ctx->lock); @@ -273,6 +329,96 @@ static void job_remove_src_buf(struct vicodec_ctx *ctx, u32 state) spin_unlock(ctx->lock); } +static const struct v4l2_fwht_pixfmt_info * +info_from_header(const struct fwht_cframe_hdr *p_hdr) +{ + unsigned int flags = ntohl(p_hdr->flags); + unsigned int width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; + unsigned int height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; + unsigned int components_num = 3; + unsigned int pixenc = 0; + unsigned int version = ntohl(p_hdr->version); + + if (version >= 2) { + components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >> + FWHT_FL_COMPONENTS_NUM_OFFSET); + pixenc = (flags & FWHT_FL_PIXENC_MSK); + } + return v4l2_fwht_default_fmt(width_div, height_div, + components_num, pixenc, 0); +} + +static bool is_header_valid(const struct fwht_cframe_hdr *p_hdr) +{ + const struct v4l2_fwht_pixfmt_info *info; + unsigned int w = ntohl(p_hdr->width); + unsigned int h = ntohl(p_hdr->height); + unsigned int version = ntohl(p_hdr->version); + unsigned int flags = ntohl(p_hdr->flags); + + if (!version || version > FWHT_VERSION) + return false; + + if (w < MIN_WIDTH || w > MAX_WIDTH || h < MIN_HEIGHT || h > MAX_HEIGHT) + return false; + + if (version >= 2) { + unsigned int components_num = 1 + + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >> + FWHT_FL_COMPONENTS_NUM_OFFSET); + unsigned int pixenc = flags & FWHT_FL_PIXENC_MSK; + + if (components_num == 0 || components_num > 4 || !pixenc) + return false; + } + + info = info_from_header(p_hdr); + if (!info) + return false; + return true; +} + +static void update_capture_data_from_header(struct vicodec_ctx *ctx) +{ + struct vicodec_q_data *q_dst = get_q_data(ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + const struct fwht_cframe_hdr *p_hdr = &ctx->state.header; + const struct v4l2_fwht_pixfmt_info *info = info_from_header(p_hdr); + unsigned int flags = ntohl(p_hdr->flags); + unsigned int hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; + unsigned int hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; + + q_dst->info = info; + q_dst->visible_width = ntohl(p_hdr->width); + q_dst->visible_height = ntohl(p_hdr->height); + q_dst->coded_width = vic_round_dim(q_dst->visible_width, hdr_width_div); + q_dst->coded_height = vic_round_dim(q_dst->visible_height, + hdr_height_div); + + q_dst->sizeimage = q_dst->coded_width * q_dst->coded_height * + q_dst->info->sizeimage_mult / q_dst->info->sizeimage_div; + ctx->state.colorspace = ntohl(p_hdr->colorspace); + + ctx->state.xfer_func = ntohl(p_hdr->xfer_func); + ctx->state.ycbcr_enc = ntohl(p_hdr->ycbcr_enc); + ctx->state.quantization = ntohl(p_hdr->quantization); +} + +static void set_last_buffer(struct vb2_v4l2_buffer *dst_buf, + const struct vb2_v4l2_buffer *src_buf, + struct vicodec_ctx *ctx) +{ + struct vicodec_q_data *q_dst = get_q_data(ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0); + dst_buf->sequence = q_dst->sequence++; + + v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, !ctx->is_enc); + dst_buf->flags |= V4L2_BUF_FLAG_LAST; + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); +} + static int job_ready(void *priv) { static const u8 magic[] = { @@ -284,7 +430,16 @@ static int job_ready(void *priv) u8 *p; u32 sz; u32 state; - + struct vicodec_q_data *q_dst = get_q_data(ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + unsigned int flags; + unsigned int hdr_width_div; + unsigned int hdr_height_div; + unsigned int max_to_copy; + unsigned int comp_frame_size; + + if (ctx->source_changed) + return 0; if (ctx->is_enc || ctx->comp_has_frame) return 1; @@ -299,59 +454,27 @@ restart: state = VB2_BUF_STATE_DONE; - if (!ctx->comp_size) { - state = VB2_BUF_STATE_ERROR; - for (; p < p_src + sz; p++) { - u32 copy; - - p = memchr(p, magic[ctx->comp_magic_cnt], - p_src + sz - p); - if (!p) { - ctx->comp_magic_cnt = 0; - break; - } - copy = sizeof(magic) - ctx->comp_magic_cnt; - if (p_src + sz - p < copy) - copy = p_src + sz - p; - - memcpy(ctx->state.compressed_frame + ctx->comp_magic_cnt, - p, copy); - ctx->comp_magic_cnt += copy; - if (!memcmp(ctx->state.compressed_frame, magic, - ctx->comp_magic_cnt)) { - p += copy; - state = VB2_BUF_STATE_DONE; - break; - } - ctx->comp_magic_cnt = 0; - } - if (ctx->comp_magic_cnt < sizeof(magic)) { + if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { + state = get_next_header(ctx, &p, p_src + sz - p); + if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { job_remove_src_buf(ctx, state); goto restart; } - ctx->comp_size = sizeof(magic); } - if (ctx->comp_size < sizeof(struct fwht_cframe_hdr)) { - struct fwht_cframe_hdr *p_hdr = - (struct fwht_cframe_hdr *)ctx->state.compressed_frame; - u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->comp_size; - if (copy > p_src + sz - p) - copy = p_src + sz - p; - memcpy(ctx->state.compressed_frame + ctx->comp_size, - p, copy); - p += copy; - ctx->comp_size += copy; - if (ctx->comp_size < sizeof(struct fwht_cframe_hdr)) { - job_remove_src_buf(ctx, state); - goto restart; - } - ctx->comp_frame_size = ntohl(p_hdr->size) + sizeof(*p_hdr); - if (ctx->comp_frame_size > ctx->comp_max_size) - ctx->comp_frame_size = ctx->comp_max_size; - } - if (ctx->comp_size < ctx->comp_frame_size) { - u32 copy = ctx->comp_frame_size - ctx->comp_size; + comp_frame_size = ntohl(ctx->state.header.size); + + /* + * The current scanned frame might be the first frame of a new + * resolution so its size might be larger than ctx->comp_max_size. + * In that case it is copied up to the current buffer capacity and + * the copy will continue after allocating new large enough buffer + * when restreaming + */ + max_to_copy = min(comp_frame_size, ctx->comp_max_size); + + if (ctx->comp_size < max_to_copy) { + u32 copy = max_to_copy - ctx->comp_size; if (copy > p_src + sz - p) copy = p_src + sz - p; @@ -360,15 +483,17 @@ restart: p, copy); p += copy; ctx->comp_size += copy; - if (ctx->comp_size < ctx->comp_frame_size) { + if (ctx->comp_size < max_to_copy) { job_remove_src_buf(ctx, state); goto restart; } } ctx->cur_buf_offset = p - p_src; - ctx->comp_has_frame = true; + if (ctx->comp_size == comp_frame_size) + ctx->comp_has_frame = true; ctx->comp_has_next_frame = false; - if (sz - ctx->cur_buf_offset >= sizeof(struct fwht_cframe_hdr)) { + if (ctx->comp_has_frame && sz - ctx->cur_buf_offset >= + sizeof(struct fwht_cframe_hdr)) { struct fwht_cframe_hdr *p_hdr = (struct fwht_cframe_hdr *)p; u32 frame_size = ntohl(p_hdr->size); u32 remaining = sz - ctx->cur_buf_offset - sizeof(*p_hdr); @@ -376,6 +501,36 @@ restart: if (!memcmp(p, magic, sizeof(magic))) ctx->comp_has_next_frame = remaining >= frame_size; } + /* + * if the header is invalid the device_run will just drop the frame + * with an error + */ + if (!is_header_valid(&ctx->state.header) && ctx->comp_has_frame) + return 1; + flags = ntohl(ctx->state.header.flags); + hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; + hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; + + if (ntohl(ctx->state.header.width) != q_dst->visible_width || + ntohl(ctx->state.header.height) != q_dst->visible_height || + !q_dst->info || + hdr_width_div != q_dst->info->width_div || + hdr_height_div != q_dst->info->height_div) { + static const struct v4l2_event rs_event = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, + }; + + struct vb2_v4l2_buffer *dst_buf = + v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); + + update_capture_data_from_header(ctx); + ctx->first_source_change_sent = true; + v4l2_event_queue_fh(&ctx->fh, &rs_event); + set_last_buffer(dst_buf, src_buf, ctx); + ctx->source_changed = true; + return 0; + } return 1; } @@ -403,9 +558,10 @@ static int vidioc_querycap(struct file *file, void *priv, return 0; } -static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out) +static int enum_fmt(struct v4l2_fmtdesc *f, struct vicodec_ctx *ctx, + bool is_out) { - bool is_uncomp = (is_enc && is_out) || (!is_enc && !is_out); + bool is_uncomp = (ctx->is_enc && is_out) || (!ctx->is_enc && !is_out); if (V4L2_TYPE_IS_MULTIPLANAR(f->type) && !multiplanar) return -EINVAL; @@ -414,8 +570,16 @@ static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out) if (is_uncomp) { const struct v4l2_fwht_pixfmt_info *info = - v4l2_fwht_get_pixfmt(f->index); + get_q_data(ctx, f->type)->info; + if (!info || ctx->is_enc) + info = v4l2_fwht_get_pixfmt(f->index); + else + info = v4l2_fwht_default_fmt(info->width_div, + info->height_div, + info->components_num, + info->pixenc, + f->index); if (!info) return -EINVAL; f->pixelformat = info->id; @@ -432,7 +596,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, { struct vicodec_ctx *ctx = file2ctx(file); - return enum_fmt(f, ctx->is_enc, false); + return enum_fmt(f, ctx, false); } static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, @@ -440,7 +604,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, { struct vicodec_ctx *ctx = file2ctx(file); - return enum_fmt(f, ctx->is_enc, true); + return enum_fmt(f, ctx, true); } static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) @@ -458,17 +622,21 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) q_data = get_q_data(ctx, f->type); info = q_data->info; + if (!info) + info = v4l2_fwht_get_pixfmt(0); + switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (multiplanar) return -EINVAL; pix = &f->fmt.pix; - pix->width = q_data->width; - pix->height = q_data->height; + pix->width = q_data->coded_width; + pix->height = q_data->coded_height; pix->field = V4L2_FIELD_NONE; pix->pixelformat = info->id; - pix->bytesperline = q_data->width * info->bytesperline_mult; + pix->bytesperline = q_data->coded_width * + info->bytesperline_mult; pix->sizeimage = q_data->sizeimage; pix->colorspace = ctx->state.colorspace; pix->xfer_func = ctx->state.xfer_func; @@ -481,13 +649,13 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) if (!multiplanar) return -EINVAL; pix_mp = &f->fmt.pix_mp; - pix_mp->width = q_data->width; - pix_mp->height = q_data->height; + pix_mp->width = q_data->coded_width; + pix_mp->height = q_data->coded_height; pix_mp->field = V4L2_FIELD_NONE; pix_mp->pixelformat = info->id; pix_mp->num_planes = 1; pix_mp->plane_fmt[0].bytesperline = - q_data->width * info->bytesperline_mult; + q_data->coded_width * info->bytesperline_mult; pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage; pix_mp->colorspace = ctx->state.colorspace; pix_mp->xfer_func = ctx->state.xfer_func; @@ -528,8 +696,13 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) pix = &f->fmt.pix; if (pix->pixelformat != V4L2_PIX_FMT_FWHT) info = find_fmt(pix->pixelformat); - pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH) & ~7; - pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT) & ~7; + + pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH); + pix->width = vic_round_dim(pix->width, info->width_div); + + pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT); + pix->height = vic_round_dim(pix->height, info->height_div); + pix->field = V4L2_FIELD_NONE; pix->bytesperline = pix->width * info->bytesperline_mult; @@ -545,9 +718,14 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) if (pix_mp->pixelformat != V4L2_PIX_FMT_FWHT) info = find_fmt(pix_mp->pixelformat); pix_mp->num_planes = 1; - pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH) & ~7; - pix_mp->height = - clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT) & ~7; + + pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH); + pix_mp->width = vic_round_dim(pix_mp->width, info->width_div); + + pix_mp->height = clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT); + pix_mp->height = vic_round_dim(pix_mp->height, + info->height_div); + pix_mp->field = V4L2_FIELD_NONE; plane->bytesperline = pix_mp->width * info->bytesperline_mult; @@ -657,9 +835,10 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) pix = &f->fmt.pix; if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type)) fmt_changed = + !q_data->info || q_data->info->id != pix->pixelformat || - q_data->width != pix->width || - q_data->height != pix->height; + q_data->coded_width != pix->width || + q_data->coded_height != pix->height; if (vb2_is_busy(vq) && fmt_changed) return -EBUSY; @@ -668,8 +847,8 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) q_data->info = &pixfmt_fwht; else q_data->info = find_fmt(pix->pixelformat); - q_data->width = pix->width; - q_data->height = pix->height; + q_data->coded_width = pix->width; + q_data->coded_height = pix->height; q_data->sizeimage = pix->sizeimage; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: @@ -677,9 +856,10 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) pix_mp = &f->fmt.pix_mp; if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type)) fmt_changed = + !q_data->info || q_data->info->id != pix_mp->pixelformat || - q_data->width != pix_mp->width || - q_data->height != pix_mp->height; + q_data->coded_width != pix_mp->width || + q_data->coded_height != pix_mp->height; if (vb2_is_busy(vq) && fmt_changed) return -EBUSY; @@ -688,17 +868,24 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) q_data->info = &pixfmt_fwht; else q_data->info = find_fmt(pix_mp->pixelformat); - q_data->width = pix_mp->width; - q_data->height = pix_mp->height; + q_data->coded_width = pix_mp->width; + q_data->coded_height = pix_mp->height; q_data->sizeimage = pix_mp->plane_fmt[0].sizeimage; break; default: return -EINVAL; } + if (q_data->visible_width > q_data->coded_width) + q_data->visible_width = q_data->coded_width; + if (q_data->visible_height > q_data->coded_height) + q_data->visible_height = q_data->coded_height; + dprintk(ctx->dev, - "Setting format for type %d, wxh: %dx%d, fourcc: %08x\n", - f->type, q_data->width, q_data->height, q_data->info->id); + "Setting format for type %d, coded wxh: %dx%d, visible wxh: %dx%d, fourcc: %08x\n", + f->type, q_data->coded_width, q_data->coded_height, + q_data->visible_width, q_data->visible_height, + q_data->info->id); return 0; } @@ -753,6 +940,84 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv, return ret; } +static int vidioc_g_selection(struct file *file, void *priv, + struct v4l2_selection *s) +{ + struct vicodec_ctx *ctx = file2ctx(file); + struct vicodec_q_data *q_data; + enum v4l2_buf_type valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + enum v4l2_buf_type valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + if (multiplanar) { + valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + } + + if (s->type != valid_cap_type && s->type != valid_out_type) + return -EINVAL; + + q_data = get_q_data(ctx, s->type); + if (!q_data) + return -EINVAL; + /* + * encoder supports only cropping on the OUTPUT buffer + * decoder supports only composing on the CAPTURE buffer + */ + if ((ctx->is_enc && s->type == valid_out_type) || + (!ctx->is_enc && s->type == valid_cap_type)) { + switch (s->target) { + case V4L2_SEL_TGT_COMPOSE: + case V4L2_SEL_TGT_CROP: + s->r.left = 0; + s->r.top = 0; + s->r.width = q_data->visible_width; + s->r.height = q_data->visible_height; + return 0; + case V4L2_SEL_TGT_COMPOSE_DEFAULT: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP_BOUNDS: + s->r.left = 0; + s->r.top = 0; + s->r.width = q_data->coded_width; + s->r.height = q_data->coded_height; + return 0; + } + } + return -EINVAL; +} + +static int vidioc_s_selection(struct file *file, void *priv, + struct v4l2_selection *s) +{ + struct vicodec_ctx *ctx = file2ctx(file); + struct vicodec_q_data *q_data; + enum v4l2_buf_type out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + if (multiplanar) + out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + + if (s->type != out_type) + return -EINVAL; + + q_data = get_q_data(ctx, s->type); + if (!q_data) + return -EINVAL; + + if (!ctx->is_enc || s->target != V4L2_SEL_TGT_CROP) + return -EINVAL; + + s->r.left = 0; + s->r.top = 0; + q_data->visible_width = clamp(s->r.width, MIN_WIDTH, + q_data->coded_width); + s->r.width = q_data->visible_width; + q_data->visible_height = clamp(s->r.height, MIN_HEIGHT, + q_data->coded_height); + s->r.height = q_data->visible_height; + return 0; +} + static void vicodec_mark_last_buf(struct vicodec_ctx *ctx) { static const struct v4l2_event eos_event = { @@ -853,7 +1118,13 @@ static int vicodec_enum_framesizes(struct file *file, void *fh, static int vicodec_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { + struct vicodec_ctx *ctx = container_of(fh, struct vicodec_ctx, fh); + switch (sub->type) { + case V4L2_EVENT_SOURCE_CHANGE: + if (ctx->is_enc) + return -EINVAL; + /* fall through */ case V4L2_EVENT_EOS: return v4l2_event_subscribe(fh, sub, 0, NULL); default: @@ -895,6 +1166,9 @@ static const struct v4l2_ioctl_ops vicodec_ioctl_ops = { .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + .vidioc_g_selection = vidioc_g_selection, + .vidioc_s_selection = vidioc_s_selection, + .vidioc_try_encoder_cmd = vicodec_try_encoder_cmd, .vidioc_encoder_cmd = vicodec_encoder_cmd, .vidioc_try_decoder_cmd = vicodec_try_decoder_cmd, @@ -960,7 +1234,71 @@ static void vicodec_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + unsigned int sz = vb2_get_plane_payload(&vbuf->vb2_buf, 0); + u8 *p_src = vb2_plane_vaddr(&vbuf->vb2_buf, 0); + u8 *p = p_src; + struct vb2_queue *vq_out = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, + V4L2_BUF_TYPE_VIDEO_OUTPUT); + struct vb2_queue *vq_cap = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + bool header_valid = false; + static const struct v4l2_event rs_event = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, + }; + /* buf_queue handles only the first source change event */ + if (ctx->first_source_change_sent) { + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); + return; + } + + /* + * if both queues are streaming, the source change event is + * handled in job_ready + */ + if (vb2_is_streaming(vq_cap) && vb2_is_streaming(vq_out)) { + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); + return; + } + + /* + * source change event is relevant only for the decoder + * in the compressed stream + */ + if (ctx->is_enc || !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); + return; + } + + do { + enum vb2_buffer_state state = + get_next_header(ctx, &p, p_src + sz - p); + + if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { + v4l2_m2m_buf_done(vbuf, state); + return; + } + header_valid = is_header_valid(&ctx->state.header); + /* + * p points right after the end of the header in the + * buffer. If the header is invalid we set p to point + * to the next byte after the start of the header + */ + if (!header_valid) { + p = p - sizeof(struct fwht_cframe_hdr) + 1; + if (p < p_src) + p = p_src; + ctx->header_size = 0; + ctx->comp_magic_cnt = 0; + } + + } while (!header_valid); + + ctx->cur_buf_offset = p - p_src; + update_capture_data_from_header(ctx); + ctx->first_source_change_sent = true; + v4l2_event_queue_fh(&ctx->fh, &rs_event); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } @@ -988,47 +1326,70 @@ static int vicodec_start_streaming(struct vb2_queue *q, struct vicodec_ctx *ctx = vb2_get_drv_priv(q); struct vicodec_q_data *q_data = get_q_data(ctx, q->type); struct v4l2_fwht_state *state = &ctx->state; - unsigned int size = q_data->width * q_data->height; const struct v4l2_fwht_pixfmt_info *info = q_data->info; - unsigned int chroma_div = info->width_div * info->height_div; + unsigned int size = q_data->coded_width * q_data->coded_height; + unsigned int chroma_div; unsigned int total_planes_size; + u8 *new_comp_frame; - /* - * we don't know ahead how many components are in the encoding type - * V4L2_PIX_FMT_FWHT, so we will allocate space for 4 planes. - */ - if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4) + if (!info) + return -EINVAL; + + chroma_div = info->width_div * info->height_div; + q_data->sequence = 0; + + ctx->last_src_buf = NULL; + ctx->last_dst_buf = NULL; + state->gop_cnt = 0; + + if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) || + (!V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) + return 0; + + if (info->id == V4L2_PIX_FMT_FWHT) { + vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED); + return -EINVAL; + } + if (info->components_num == 4) total_planes_size = 2 * size + 2 * (size / chroma_div); else if (info->components_num == 3) total_planes_size = size + 2 * (size / chroma_div); else total_planes_size = size; - q_data->sequence = 0; + state->visible_width = q_data->visible_width; + state->visible_height = q_data->visible_height; + state->coded_width = q_data->coded_width; + state->coded_height = q_data->coded_height; + state->stride = q_data->coded_width * + info->bytesperline_mult; - if (!V4L2_TYPE_IS_OUTPUT(q->type)) { - if (!ctx->is_enc) { - state->width = q_data->width; - state->height = q_data->height; - } - return 0; - } - - if (ctx->is_enc) { - state->width = q_data->width; - state->height = q_data->height; - } - state->ref_frame.width = state->ref_frame.height = 0; state->ref_frame.luma = kvmalloc(total_planes_size, GFP_KERNEL); - ctx->comp_max_size = total_planes_size + sizeof(struct fwht_cframe_hdr); - state->compressed_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL); - if (!state->ref_frame.luma || !state->compressed_frame) { + ctx->comp_max_size = total_planes_size; + new_comp_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL); + + if (!state->ref_frame.luma || !new_comp_frame) { kvfree(state->ref_frame.luma); - kvfree(state->compressed_frame); + kvfree(new_comp_frame); vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED); return -ENOMEM; } - if (info->id == V4L2_PIX_FMT_FWHT || info->components_num >= 3) { + /* + * if state->compressed_frame was already allocated then + * it contain data of the first frame of the new resolution + */ + if (state->compressed_frame) { + if (ctx->comp_size > ctx->comp_max_size) + ctx->comp_size = ctx->comp_max_size; + + memcpy(new_comp_frame, + state->compressed_frame, ctx->comp_size); + } + + kvfree(state->compressed_frame); + state->compressed_frame = new_comp_frame; + + if (info->components_num >= 3) { state->ref_frame.cb = state->ref_frame.luma + size; state->ref_frame.cr = state->ref_frame.cb + size / chroma_div; } else { @@ -1036,20 +1397,11 @@ static int vicodec_start_streaming(struct vb2_queue *q, state->ref_frame.cr = NULL; } - if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4) + if (info->components_num == 4) state->ref_frame.alpha = state->ref_frame.cr + size / chroma_div; else state->ref_frame.alpha = NULL; - - ctx->last_src_buf = NULL; - ctx->last_dst_buf = NULL; - state->gop_cnt = 0; - ctx->cur_buf_offset = 0; - ctx->comp_size = 0; - ctx->comp_magic_cnt = 0; - ctx->comp_has_frame = false; - return 0; } @@ -1059,11 +1411,20 @@ static void vicodec_stop_streaming(struct vb2_queue *q) vicodec_return_bufs(q, VB2_BUF_STATE_ERROR); - if (!V4L2_TYPE_IS_OUTPUT(q->type)) - return; - - kvfree(ctx->state.ref_frame.luma); - kvfree(ctx->state.compressed_frame); + if ((!V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) || + (V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) { + kvfree(ctx->state.ref_frame.luma); + ctx->comp_max_size = 0; + ctx->source_changed = false; + } + if (V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) { + ctx->cur_buf_offset = 0; + ctx->comp_size = 0; + ctx->header_size = 0; + ctx->comp_magic_cnt = 0; + ctx->comp_has_frame = 0; + ctx->comp_has_next_frame = 0; + } } static const struct vb2_ops vicodec_qops = { @@ -1204,8 +1565,10 @@ static int vicodec_open(struct file *file) ctx->q_data[V4L2_M2M_SRC].info = ctx->is_enc ? v4l2_fwht_get_pixfmt(0) : &pixfmt_fwht; - ctx->q_data[V4L2_M2M_SRC].width = 1280; - ctx->q_data[V4L2_M2M_SRC].height = 720; + ctx->q_data[V4L2_M2M_SRC].coded_width = 1280; + ctx->q_data[V4L2_M2M_SRC].coded_height = 720; + ctx->q_data[V4L2_M2M_SRC].visible_width = 1280; + ctx->q_data[V4L2_M2M_SRC].visible_height = 720; size = 1280 * 720 * ctx->q_data[V4L2_M2M_SRC].info->sizeimage_mult / ctx->q_data[V4L2_M2M_SRC].info->sizeimage_div; if (ctx->is_enc) @@ -1213,16 +1576,17 @@ static int vicodec_open(struct file *file) else ctx->q_data[V4L2_M2M_SRC].sizeimage = size + sizeof(struct fwht_cframe_hdr); - ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC]; - ctx->q_data[V4L2_M2M_DST].info = - ctx->is_enc ? &pixfmt_fwht : v4l2_fwht_get_pixfmt(0); - size = 1280 * 720 * ctx->q_data[V4L2_M2M_DST].info->sizeimage_mult / - ctx->q_data[V4L2_M2M_DST].info->sizeimage_div; - if (ctx->is_enc) - ctx->q_data[V4L2_M2M_DST].sizeimage = - size + sizeof(struct fwht_cframe_hdr); - else - ctx->q_data[V4L2_M2M_DST].sizeimage = size; + if (ctx->is_enc) { + ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC]; + ctx->q_data[V4L2_M2M_DST].info = &pixfmt_fwht; + ctx->q_data[V4L2_M2M_DST].sizeimage = 1280 * 720 * + ctx->q_data[V4L2_M2M_DST].info->sizeimage_mult / + ctx->q_data[V4L2_M2M_DST].info->sizeimage_div + + sizeof(struct fwht_cframe_hdr); + } else { + ctx->q_data[V4L2_M2M_DST].info = NULL; + } + ctx->state.colorspace = V4L2_COLORSPACE_REC709; if (ctx->is_enc) { @@ -1310,6 +1674,8 @@ static int vicodec_probe(struct platform_device *pdev) #ifdef CONFIG_MEDIA_CONTROLLER dev->mdev.dev = &pdev->dev; strscpy(dev->mdev.model, "vicodec", sizeof(dev->mdev.model)); + strscpy(dev->mdev.bus_info, "platform:vicodec", + sizeof(dev->mdev.bus_info)); media_device_init(&dev->mdev); dev->v4l2_dev.mdev = &dev->mdev; #endif diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c index c33900e3c23e..0ba30756e1e4 100644 --- a/drivers/media/platform/video-mux.c +++ b/drivers/media/platform/video-mux.c @@ -263,6 +263,26 @@ static int video_mux_set_format(struct v4l2_subdev *sd, case MEDIA_BUS_FMT_UYYVYY16_0_5X48: case MEDIA_BUS_FMT_JPEG_1X8: case MEDIA_BUS_FMT_AHSV8888_1X32: + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + case MEDIA_BUS_FMT_SBGGR14_1X14: + case MEDIA_BUS_FMT_SGBRG14_1X14: + case MEDIA_BUS_FMT_SGRBG14_1X14: + case MEDIA_BUS_FMT_SRGGB14_1X14: + case MEDIA_BUS_FMT_SBGGR16_1X16: + case MEDIA_BUS_FMT_SGBRG16_1X16: + case MEDIA_BUS_FMT_SGRBG16_1X16: + case MEDIA_BUS_FMT_SRGGB16_1X16: break; default: sdformat->format.code = MEDIA_BUS_FMT_Y8_1X8; diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index 89d9c4c21037..34dcaca45d8b 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * A virtual v4l2-mem2mem example device. * @@ -34,22 +35,34 @@ MODULE_DESCRIPTION("Virtual device for mem2mem framework testing"); MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); MODULE_LICENSE("GPL"); -MODULE_VERSION("0.1.1"); +MODULE_VERSION("0.2"); MODULE_ALIAS("mem2mem_testdev"); -static unsigned debug; +static unsigned int debug; module_param(debug, uint, 0644); -MODULE_PARM_DESC(debug, "activates debug info"); +MODULE_PARM_DESC(debug, "debug level"); + +/* Default transaction time in msec */ +static unsigned int default_transtime = 40; /* Max 25 fps */ +module_param(default_transtime, uint, 0644); +MODULE_PARM_DESC(default_transtime, "default transaction time in ms"); #define MIN_W 32 #define MIN_H 32 #define MAX_W 640 #define MAX_H 480 -#define DIM_ALIGN_MASK 7 /* 8-byte alignment for line length */ + +/* Pixel alignment for non-bayer formats */ +#define WIDTH_ALIGN 2 +#define HEIGHT_ALIGN 1 + +/* Pixel alignment for bayer formats */ +#define BAYER_WIDTH_ALIGN 2 +#define BAYER_HEIGHT_ALIGN 2 /* Flags that indicate a format can be used for capture/output */ -#define MEM2MEM_CAPTURE (1 << 0) -#define MEM2MEM_OUTPUT (1 << 1) +#define MEM2MEM_CAPTURE BIT(0) +#define MEM2MEM_OUTPUT BIT(1) #define MEM2MEM_NAME "vim2m" @@ -58,18 +71,12 @@ MODULE_PARM_DESC(debug, "activates debug info"); /* In bytes, per queue */ #define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024) -/* Default transaction time in msec */ -#define MEM2MEM_DEF_TRANSTIME 40 -#define MEM2MEM_COLOR_STEP (0xff >> 4) -#define MEM2MEM_NUM_TILES 8 - /* Flags that indicate processing mode */ -#define MEM2MEM_HFLIP (1 << 0) -#define MEM2MEM_VFLIP (1 << 1) - -#define dprintk(dev, fmt, arg...) \ - v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) +#define MEM2MEM_HFLIP BIT(0) +#define MEM2MEM_VFLIP BIT(1) +#define dprintk(dev, lvl, fmt, arg...) \ + v4l2_dbg(lvl, debug, &(dev)->v4l2_dev, "%s: " fmt, __func__, ## arg) static void vim2m_dev_release(struct device *dev) {} @@ -83,21 +90,46 @@ struct vim2m_fmt { u32 fourcc; int depth; /* Types the format can be used for */ - u32 types; + u32 types; }; static struct vim2m_fmt formats[] = { { - .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */ + .fourcc = V4L2_PIX_FMT_RGB565, /* rrrrrggg gggbbbbb */ .depth = 16, - /* Both capture and output format */ - .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, - }, - { + .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, + }, { + .fourcc = V4L2_PIX_FMT_RGB565X, /* gggbbbbb rrrrrggg */ + .depth = 16, + .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, + }, { + .fourcc = V4L2_PIX_FMT_RGB24, + .depth = 24, + .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, + }, { + .fourcc = V4L2_PIX_FMT_BGR24, + .depth = 24, + .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, + }, { .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, - /* Output-only format */ - .types = MEM2MEM_OUTPUT, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR8, + .depth = 8, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG8, + .depth = 8, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG8, + .depth = 8, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB8, + .depth = 8, + .types = MEM2MEM_CAPTURE, }, }; @@ -120,14 +152,14 @@ enum { #define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000) #define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001) -static struct vim2m_fmt *find_format(struct v4l2_format *f) +static struct vim2m_fmt *find_format(u32 fourcc) { struct vim2m_fmt *fmt; unsigned int k; for (k = 0; k < NUM_FORMATS; k++) { fmt = &formats[k]; - if (fmt->fourcc == f->fmt.pix.pixelformat) + if (fmt->fourcc == fourcc) break; } @@ -137,6 +169,24 @@ static struct vim2m_fmt *find_format(struct v4l2_format *f) return &formats[k]; } +static void get_alignment(u32 fourcc, + unsigned int *walign, unsigned int *halign) +{ + switch (fourcc) { + case V4L2_PIX_FMT_SBGGR8: + case V4L2_PIX_FMT_SGBRG8: + case V4L2_PIX_FMT_SGRBG8: + case V4L2_PIX_FMT_SRGGB8: + *walign = BAYER_WIDTH_ALIGN; + *halign = BAYER_HEIGHT_ALIGN; + return; + default: + *walign = WIDTH_ALIGN; + *halign = HEIGHT_ALIGN; + return; + } +} + struct vim2m_dev { struct v4l2_device v4l2_dev; struct video_device vfd; @@ -146,9 +196,6 @@ struct vim2m_dev { atomic_t num_inst; struct mutex dev_mutex; - spinlock_t irqlock; - - struct delayed_work work_run; struct v4l2_m2m_dev *m2m_dev; }; @@ -167,6 +214,10 @@ struct vim2m_ctx { /* Transaction time (i.e. simulated processing time) in milliseconds */ u32 transtime; + struct mutex vb_mutex; + struct delayed_work work_run; + spinlock_t irqlock; + /* Abort requested by m2m */ int aborting; @@ -188,7 +239,7 @@ static inline struct vim2m_ctx *file2ctx(struct file *file) } static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx, - enum v4l2_buf_type type) + enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: @@ -196,28 +247,221 @@ static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx, case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &ctx->q_data[V4L2_M2M_DST]; default: - BUG(); + return NULL; + } +} + +static const char *type_name(enum v4l2_buf_type type) +{ + switch (type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + return "Output"; + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + return "Capture"; + default: + return "Invalid"; + } +} + +#define CLIP(__color) \ + (u8)(((__color) > 0xff) ? 0xff : (((__color) < 0) ? 0 : (__color))) + +static void copy_line(struct vim2m_q_data *q_data_out, + u8 *src, u8 *dst, bool reverse) +{ + int x, depth = q_data_out->fmt->depth >> 3; + + if (!reverse) { + memcpy(dst, src, q_data_out->width * depth); + } else { + for (x = 0; x < q_data_out->width >> 1; x++) { + memcpy(dst, src, depth); + memcpy(dst + depth, src - depth, depth); + src -= depth << 1; + dst += depth << 1; + } + return; } - return NULL; } +static void copy_two_pixels(struct vim2m_q_data *q_data_in, + struct vim2m_q_data *q_data_out, + u8 *src[2], u8 **dst, int ypos, bool reverse) +{ + struct vim2m_fmt *out = q_data_out->fmt; + struct vim2m_fmt *in = q_data_in->fmt; + u8 _r[2], _g[2], _b[2], *r, *g, *b; + int i; + + /* Step 1: read two consecutive pixels from src pointer */ + + r = _r; + g = _g; + b = _b; + + switch (in->fourcc) { + case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */ + for (i = 0; i < 2; i++) { + u16 pix = *(u16 *)(src[i]); + + *r++ = (u8)(((pix & 0xf800) >> 11) << 3) | 0x07; + *g++ = (u8)((((pix & 0x07e0) >> 5)) << 2) | 0x03; + *b++ = (u8)((pix & 0x1f) << 3) | 0x07; + } + break; + case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */ + for (i = 0; i < 2; i++) { + u16 pix = *(u16 *)(src[i]); + + *r++ = (u8)(((0x00f8 & pix) >> 3) << 3) | 0x07; + *g++ = (u8)(((pix & 0x7) << 2) | + ((pix & 0xe000) >> 5)) | 0x03; + *b++ = (u8)(((pix & 0x1f00) >> 8) << 3) | 0x07; + } + break; + default: + case V4L2_PIX_FMT_RGB24: + for (i = 0; i < 2; i++) { + *r++ = src[i][0]; + *g++ = src[i][1]; + *b++ = src[i][2]; + } + break; + case V4L2_PIX_FMT_BGR24: + for (i = 0; i < 2; i++) { + *b++ = src[i][0]; + *g++ = src[i][1]; + *r++ = src[i][2]; + } + break; + } + + /* Step 2: store two consecutive points, reversing them if needed */ + + r = _r; + g = _g; + b = _b; + + switch (out->fourcc) { + case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */ + for (i = 0; i < 2; i++) { + u16 *pix = (u16 *)*dst; + + *pix = ((*r << 8) & 0xf800) | ((*g << 3) & 0x07e0) | + (*b >> 3); + + *dst += 2; + } + return; + case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */ + for (i = 0; i < 2; i++) { + u16 *pix = (u16 *)*dst; + u8 green = *g++ >> 2; + + *pix = ((green << 8) & 0xe000) | (green & 0x07) | + ((*b++ << 5) & 0x1f00) | ((*r++ & 0xf8)); + + *dst += 2; + } + return; + case V4L2_PIX_FMT_RGB24: + for (i = 0; i < 2; i++) { + *(*dst)++ = *r++; + *(*dst)++ = *g++; + *(*dst)++ = *b++; + } + return; + case V4L2_PIX_FMT_BGR24: + for (i = 0; i < 2; i++) { + *(*dst)++ = *b++; + *(*dst)++ = *g++; + *(*dst)++ = *r++; + } + return; + case V4L2_PIX_FMT_YUYV: + default: + { + u8 y, y1, u, v; + + y = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b) + + 524288) >> 15); + u = ((-4878 * (*r) - 9578 * (*g) + 14456 * (*b) + + 4210688) >> 15); + v = ((14456 * (*r++) - 12105 * (*g++) - 2351 * (*b++) + + 4210688) >> 15); + y1 = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b) + + 524288) >> 15); + + *(*dst)++ = y; + *(*dst)++ = u; + + *(*dst)++ = y1; + *(*dst)++ = v; + return; + } + case V4L2_PIX_FMT_SBGGR8: + if (!(ypos & 1)) { + *(*dst)++ = *b; + *(*dst)++ = *++g; + } else { + *(*dst)++ = *g; + *(*dst)++ = *++r; + } + return; + case V4L2_PIX_FMT_SGBRG8: + if (!(ypos & 1)) { + *(*dst)++ = *g; + *(*dst)++ = *++b; + } else { + *(*dst)++ = *r; + *(*dst)++ = *++g; + } + return; + case V4L2_PIX_FMT_SGRBG8: + if (!(ypos & 1)) { + *(*dst)++ = *g; + *(*dst)++ = *++r; + } else { + *(*dst)++ = *b; + *(*dst)++ = *++g; + } + return; + case V4L2_PIX_FMT_SRGGB8: + if (!(ypos & 1)) { + *(*dst)++ = *r; + *(*dst)++ = *++g; + } else { + *(*dst)++ = *g; + *(*dst)++ = *++b; + } + return; + } +} static int device_process(struct vim2m_ctx *ctx, struct vb2_v4l2_buffer *in_vb, struct vb2_v4l2_buffer *out_vb) { struct vim2m_dev *dev = ctx->dev; - struct vim2m_q_data *q_data; - u8 *p_in, *p_out; - int x, y, t, w; - int tile_w, bytes_left; - int width, height, bytesperline; + struct vim2m_q_data *q_data_in, *q_data_out; + u8 *p_in, *p_line, *p_in_x[2], *p, *p_out; + unsigned int width, height, bytesperline, bytes_per_pixel; + unsigned int x, y, y_in, y_out, x_int, x_fract, x_err, x_offset; + int start, end, step; + + q_data_in = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); + if (!q_data_in) + return 0; + bytesperline = (q_data_in->width * q_data_in->fmt->depth) >> 3; + bytes_per_pixel = q_data_in->fmt->depth >> 3; - q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); + q_data_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (!q_data_out) + return 0; - width = q_data->width; - height = q_data->height; - bytesperline = (q_data->width * q_data->fmt->depth) >> 3; + /* As we're doing scaling, use the output dimensions here */ + height = q_data_out->height; + width = q_data_out->width; p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0); p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0); @@ -227,109 +471,86 @@ static int device_process(struct vim2m_ctx *ctx, return -EFAULT; } - if (vb2_plane_size(&in_vb->vb2_buf, 0) > - vb2_plane_size(&out_vb->vb2_buf, 0)) { - v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); - return -EINVAL; - } + out_vb->sequence = q_data_out->sequence++; + in_vb->sequence = q_data_in->sequence++; + v4l2_m2m_buf_copy_metadata(in_vb, out_vb, true); - tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3)) - / MEM2MEM_NUM_TILES; - bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES; - w = 0; - - out_vb->sequence = - get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++; - in_vb->sequence = q_data->sequence++; - out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp; - - if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE) - out_vb->timecode = in_vb->timecode; - out_vb->field = in_vb->field; - out_vb->flags = in_vb->flags & - (V4L2_BUF_FLAG_TIMECODE | - V4L2_BUF_FLAG_KEYFRAME | - V4L2_BUF_FLAG_PFRAME | - V4L2_BUF_FLAG_BFRAME | - V4L2_BUF_FLAG_TSTAMP_SRC_MASK); - - switch (ctx->mode) { - case MEM2MEM_HFLIP | MEM2MEM_VFLIP: - p_out += bytesperline * height - bytes_left; - for (y = 0; y < height; ++y) { - for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { - if (w & 0x1) { - for (x = 0; x < tile_w; ++x) - *--p_out = *p_in++ + - MEM2MEM_COLOR_STEP; - } else { - for (x = 0; x < tile_w; ++x) - *--p_out = *p_in++ - - MEM2MEM_COLOR_STEP; - } - ++w; - } - p_in += bytes_left; - p_out -= bytes_left; - } - break; + if (ctx->mode & MEM2MEM_VFLIP) { + start = height - 1; + end = -1; + step = -1; + } else { + start = 0; + end = height; + step = 1; + } + y_out = 0; + + /* + * When format and resolution are identical, + * we can use a faster copy logic + */ + if (q_data_in->fmt->fourcc == q_data_out->fmt->fourcc && + q_data_in->width == q_data_out->width && + q_data_in->height == q_data_out->height) { + for (y = start; y != end; y += step, y_out++) { + p = p_in + (y * bytesperline); + if (ctx->mode & MEM2MEM_HFLIP) + p += bytesperline - (q_data_in->fmt->depth >> 3); + + copy_line(q_data_out, p, p_out, + ctx->mode & MEM2MEM_HFLIP); - case MEM2MEM_HFLIP: - for (y = 0; y < height; ++y) { - p_out += MEM2MEM_NUM_TILES * tile_w; - for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { - if (w & 0x01) { - for (x = 0; x < tile_w; ++x) - *--p_out = *p_in++ + - MEM2MEM_COLOR_STEP; - } else { - for (x = 0; x < tile_w; ++x) - *--p_out = *p_in++ - - MEM2MEM_COLOR_STEP; - } - ++w; - } - p_in += bytes_left; p_out += bytesperline; } - break; + return 0; + } + + /* Slower algorithm with format conversion, hflip, vflip and scaler */ + + /* To speed scaler up, use Bresenham for X dimension */ + x_int = q_data_in->width / q_data_out->width; + x_fract = q_data_in->width % q_data_out->width; + + for (y = start; y != end; y += step, y_out++) { + y_in = (y * q_data_in->height) / q_data_out->height; + x_offset = 0; + x_err = 0; - case MEM2MEM_VFLIP: - p_out += bytesperline * (height - 1); - for (y = 0; y < height; ++y) { - for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { - if (w & 0x1) { - for (x = 0; x < tile_w; ++x) - *p_out++ = *p_in++ + - MEM2MEM_COLOR_STEP; - } else { - for (x = 0; x < tile_w; ++x) - *p_out++ = *p_in++ - - MEM2MEM_COLOR_STEP; - } - ++w; + p_line = p_in + (y_in * bytesperline); + if (ctx->mode & MEM2MEM_HFLIP) + p_line += bytesperline - (q_data_in->fmt->depth >> 3); + p_in_x[0] = p_line; + + for (x = 0; x < width >> 1; x++) { + x_offset += x_int; + x_err += x_fract; + if (x_err > width) { + x_offset++; + x_err -= width; } - p_in += bytes_left; - p_out += bytes_left - 2 * bytesperline; - } - break; - default: - for (y = 0; y < height; ++y) { - for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { - if (w & 0x1) { - for (x = 0; x < tile_w; ++x) - *p_out++ = *p_in++ + - MEM2MEM_COLOR_STEP; - } else { - for (x = 0; x < tile_w; ++x) - *p_out++ = *p_in++ - - MEM2MEM_COLOR_STEP; - } - ++w; + if (ctx->mode & MEM2MEM_HFLIP) + p_in_x[1] = p_line - x_offset * bytes_per_pixel; + else + p_in_x[1] = p_line + x_offset * bytes_per_pixel; + + copy_two_pixels(q_data_in, q_data_out, + p_in_x, &p_out, y_out, + ctx->mode & MEM2MEM_HFLIP); + + /* Calculate the next p_in_x0 */ + x_offset += x_int; + x_err += x_fract; + if (x_err > width) { + x_offset++; + x_err -= width; } - p_in += bytes_left; - p_out += bytes_left; + + if (ctx->mode & MEM2MEM_HFLIP) + p_in_x[0] = p_line - x_offset * bytes_per_pixel; + else + p_in_x[0] = p_line + x_offset * bytes_per_pixel; } } @@ -349,7 +570,7 @@ static int job_ready(void *priv) if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) { - dprintk(ctx->dev, "Not enough buffers available\n"); + dprintk(ctx->dev, 1, "Not enough buffers available\n"); return 0; } @@ -373,7 +594,6 @@ static void job_abort(void *priv) static void device_run(void *priv) { struct vim2m_ctx *ctx = priv; - struct vim2m_dev *dev = ctx->dev; struct vb2_v4l2_buffer *src_buf, *dst_buf; src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); @@ -390,37 +610,38 @@ static void device_run(void *priv) &ctx->hdl); /* Run delayed work, which simulates a hardware irq */ - schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime)); + schedule_delayed_work(&ctx->work_run, msecs_to_jiffies(ctx->transtime)); } static void device_work(struct work_struct *w) { - struct vim2m_dev *vim2m_dev = - container_of(w, struct vim2m_dev, work_run.work); struct vim2m_ctx *curr_ctx; + struct vim2m_dev *vim2m_dev; struct vb2_v4l2_buffer *src_vb, *dst_vb; unsigned long flags; - curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev); + curr_ctx = container_of(w, struct vim2m_ctx, work_run.work); - if (NULL == curr_ctx) { + if (!curr_ctx) { pr_err("Instance released before the end of transaction\n"); return; } + vim2m_dev = curr_ctx->dev; + src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx); dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx); curr_ctx->num_processed++; - spin_lock_irqsave(&vim2m_dev->irqlock, flags); + spin_lock_irqsave(&curr_ctx->irqlock, flags); v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE); - spin_unlock_irqrestore(&vim2m_dev->irqlock, flags); + spin_unlock_irqrestore(&curr_ctx->irqlock, flags); if (curr_ctx->num_processed == curr_ctx->translen || curr_ctx->aborting) { - dprintk(curr_ctx->dev, "Finishing transaction\n"); + dprintk(curr_ctx->dev, 2, "Finishing capture buffer fill\n"); curr_ctx->num_processed = 0; v4l2_m2m_job_finish(vim2m_dev->m2m_dev, curr_ctx->fh.m2m_ctx); } else { @@ -437,7 +658,7 @@ static int vidioc_querycap(struct file *file, void *priv, strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); snprintf(cap->bus_info, sizeof(cap->bus_info), - "platform:%s", MEM2MEM_NAME); + "platform:%s", MEM2MEM_NAME); return 0; } @@ -453,8 +674,10 @@ static int enum_fmt(struct v4l2_fmtdesc *f, u32 type) /* index-th format of type type found ? */ if (num == f->index) break; - /* Correct type but haven't reached our index yet, - * just increment per-type index */ + /* + * Correct type but haven't reached our index yet, + * just increment per-type index + */ ++num; } } @@ -482,6 +705,27 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, return enum_fmt(f, MEM2MEM_OUTPUT); } +static int vidioc_enum_framesizes(struct file *file, void *priv, + struct v4l2_frmsizeenum *fsize) +{ + if (fsize->index != 0) + return -EINVAL; + + if (!find_format(fsize->pixel_format)) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise.min_width = MIN_W; + fsize->stepwise.min_height = MIN_H; + fsize->stepwise.max_width = MAX_W; + fsize->stepwise.max_height = MAX_H; + + get_alignment(fsize->pixel_format, + &fsize->stepwise.step_width, + &fsize->stepwise.step_height); + return 0; +} + static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) { struct vb2_queue *vq; @@ -492,6 +736,8 @@ static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) return -EINVAL; q_data = get_q_data(ctx, f->type); + if (!q_data) + return -EINVAL; f->fmt.pix.width = q_data->width; f->fmt.pix.height = q_data->height; @@ -521,8 +767,11 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt) { - /* V4L2 specification suggests the driver corrects the format struct - * if any of the dimensions is unsupported */ + int walign, halign; + /* + * V4L2 specification specifies the driver corrects the + * format struct if any of the dimensions is unsupported + */ if (f->fmt.pix.height < MIN_H) f->fmt.pix.height = MIN_H; else if (f->fmt.pix.height > MAX_H) @@ -533,7 +782,9 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt) else if (f->fmt.pix.width > MAX_W) f->fmt.pix.width = MAX_W; - f->fmt.pix.width &= ~DIM_ALIGN_MASK; + get_alignment(f->fmt.pix.pixelformat, &walign, &halign); + f->fmt.pix.width &= ~(walign - 1); + f->fmt.pix.height &= ~(halign - 1); f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; f->fmt.pix.field = V4L2_FIELD_NONE; @@ -547,10 +798,10 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct vim2m_fmt *fmt; struct vim2m_ctx *ctx = file2ctx(file); - fmt = find_format(f); + fmt = find_format(f->fmt.pix.pixelformat); if (!fmt) { f->fmt.pix.pixelformat = formats[0].fourcc; - fmt = find_format(f); + fmt = find_format(f->fmt.pix.pixelformat); } if (!(fmt->types & MEM2MEM_CAPTURE)) { v4l2_err(&ctx->dev->v4l2_dev, @@ -572,10 +823,10 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct vim2m_fmt *fmt; struct vim2m_ctx *ctx = file2ctx(file); - fmt = find_format(f); + fmt = find_format(f->fmt.pix.pixelformat); if (!fmt) { f->fmt.pix.pixelformat = formats[0].fourcc; - fmt = find_format(f); + fmt = find_format(f->fmt.pix.pixelformat); } if (!(fmt->types & MEM2MEM_OUTPUT)) { v4l2_err(&ctx->dev->v4l2_dev, @@ -607,15 +858,20 @@ static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) return -EBUSY; } - q_data->fmt = find_format(f); + q_data->fmt = find_format(f->fmt.pix.pixelformat); q_data->width = f->fmt.pix.width; q_data->height = f->fmt.pix.height; q_data->sizeimage = q_data->width * q_data->height * q_data->fmt->depth >> 3; - dprintk(ctx->dev, - "Setting format for type %d, wxh: %dx%d, fmt: %d\n", - f->type, q_data->width, q_data->height, q_data->fmt->fourcc); + dprintk(ctx->dev, 1, + "Format for type %s: %dx%d (%d bpp), fmt: %c%c%c%c\n", + type_name(f->type), q_data->width, q_data->height, + q_data->fmt->depth, + (q_data->fmt->fourcc & 0xff), + (q_data->fmt->fourcc >> 8) & 0xff, + (q_data->fmt->fourcc >> 16) & 0xff, + (q_data->fmt->fourcc >> 24) & 0xff); return 0; } @@ -674,6 +930,8 @@ static int vim2m_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_TRANS_TIME_MSEC: ctx->transtime = ctrl->val; + if (ctx->transtime < 1) + ctx->transtime = 1; break; case V4L2_CID_TRANS_NUM_BUFS: @@ -692,11 +950,11 @@ static const struct v4l2_ctrl_ops vim2m_ctrl_ops = { .s_ctrl = vim2m_s_ctrl, }; - static const struct v4l2_ioctl_ops vim2m_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, + .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, @@ -721,20 +979,23 @@ static const struct v4l2_ioctl_ops vim2m_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; - /* * Queue operations */ static int vim2m_queue_setup(struct vb2_queue *vq, - unsigned int *nbuffers, unsigned int *nplanes, - unsigned int sizes[], struct device *alloc_devs[]) + unsigned int *nbuffers, + unsigned int *nplanes, + unsigned int sizes[], + struct device *alloc_devs[]) { struct vim2m_ctx *ctx = vb2_get_drv_priv(vq); struct vim2m_q_data *q_data; unsigned int size, count = *nbuffers; q_data = get_q_data(ctx, vq->type); + if (!q_data) + return -EINVAL; size = q_data->width * q_data->height * q_data->fmt->depth >> 3; @@ -748,33 +1009,42 @@ static int vim2m_queue_setup(struct vb2_queue *vq, *nplanes = 1; sizes[0] = size; - dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size); + dprintk(ctx->dev, 1, "%s: get %d buffer(s) of size %d each.\n", + type_name(vq->type), count, size); return 0; } -static int vim2m_buf_prepare(struct vb2_buffer *vb) +static int vim2m_buf_out_validate(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + + if (vbuf->field == V4L2_FIELD_ANY) + vbuf->field = V4L2_FIELD_NONE; + if (vbuf->field != V4L2_FIELD_NONE) { + dprintk(ctx->dev, 1, "%s field isn't supported\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int vim2m_buf_prepare(struct vb2_buffer *vb) +{ + struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct vim2m_q_data *q_data; - dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); + dprintk(ctx->dev, 2, "type: %s\n", type_name(vb->vb2_queue->type)); q_data = get_q_data(ctx, vb->vb2_queue->type); - if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { - if (vbuf->field == V4L2_FIELD_ANY) - vbuf->field = V4L2_FIELD_NONE; - if (vbuf->field != V4L2_FIELD_NONE) { - dprintk(ctx->dev, "%s field isn't supported\n", - __func__); - return -EINVAL; - } - } - + if (!q_data) + return -EINVAL; if (vb2_plane_size(vb, 0) < q_data->sizeimage) { - dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n", - __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage); + dprintk(ctx->dev, 1, + "%s data will not fit into plane (%lu < %lu)\n", + __func__, vb2_plane_size(vb, 0), + (long)q_data->sizeimage); return -EINVAL; } @@ -791,11 +1061,14 @@ static void vim2m_buf_queue(struct vb2_buffer *vb) v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } -static int vim2m_start_streaming(struct vb2_queue *q, unsigned count) +static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count) { struct vim2m_ctx *ctx = vb2_get_drv_priv(q); struct vim2m_q_data *q_data = get_q_data(ctx, q->type); + if (!q_data) + return -EINVAL; + q_data->sequence = 0; return 0; } @@ -803,25 +1076,23 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count) static void vim2m_stop_streaming(struct vb2_queue *q) { struct vim2m_ctx *ctx = vb2_get_drv_priv(q); - struct vim2m_dev *dev = ctx->dev; struct vb2_v4l2_buffer *vbuf; unsigned long flags; - if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) - cancel_delayed_work_sync(&dev->work_run); + cancel_delayed_work_sync(&ctx->work_run); for (;;) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - if (vbuf == NULL) + if (!vbuf) return; v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, &ctx->hdl); - spin_lock_irqsave(&ctx->dev->irqlock, flags); + spin_lock_irqsave(&ctx->irqlock, flags); v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); - spin_unlock_irqrestore(&ctx->dev->irqlock, flags); + spin_unlock_irqrestore(&ctx->irqlock, flags); } } @@ -834,6 +1105,7 @@ static void vim2m_buf_request_complete(struct vb2_buffer *vb) static const struct vb2_ops vim2m_qops = { .queue_setup = vim2m_queue_setup, + .buf_out_validate = vim2m_buf_out_validate, .buf_prepare = vim2m_buf_prepare, .buf_queue = vim2m_buf_queue, .start_streaming = vim2m_start_streaming, @@ -843,7 +1115,8 @@ static const struct vb2_ops vim2m_qops = { .buf_request_complete = vim2m_buf_request_complete, }; -static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) +static int queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) { struct vim2m_ctx *ctx = priv; int ret; @@ -855,7 +1128,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds src_vq->ops = &vim2m_qops; src_vq->mem_ops = &vb2_vmalloc_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; - src_vq->lock = &ctx->dev->dev_mutex; + src_vq->lock = &ctx->vb_mutex; src_vq->supports_requests = true; ret = vb2_queue_init(src_vq); @@ -869,17 +1142,16 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds dst_vq->ops = &vim2m_qops; dst_vq->mem_ops = &vb2_vmalloc_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; - dst_vq->lock = &ctx->dev->dev_mutex; + dst_vq->lock = &ctx->vb_mutex; return vb2_queue_init(dst_vq); } -static const struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = { +static struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = { .ops = &vim2m_ctrl_ops, .id = V4L2_CID_TRANS_TIME_MSEC, .name = "Transaction Time (msec)", .type = V4L2_CTRL_TYPE_INTEGER, - .def = MEM2MEM_DEF_TRANSTIME, .min = 1, .max = 10001, .step = 1, @@ -921,6 +1193,8 @@ static int vim2m_open(struct file *file) v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); + + vim2m_ctrl_trans_time_msec.def = default_transtime; v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_time_msec, NULL); v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_num_bufs, NULL); if (hdl->error) { @@ -944,6 +1218,10 @@ static int vim2m_open(struct file *file) ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); + mutex_init(&ctx->vb_mutex); + spin_lock_init(&ctx->irqlock); + INIT_DELAYED_WORK(&ctx->work_run, device_work); + if (IS_ERR(ctx->fh.m2m_ctx)) { rc = PTR_ERR(ctx->fh.m2m_ctx); @@ -956,7 +1234,7 @@ static int vim2m_open(struct file *file) v4l2_fh_add(&ctx->fh); atomic_inc(&dev->num_inst); - dprintk(dev, "Created instance: %p, m2m_ctx: %p\n", + dprintk(dev, 1, "Created instance: %p, m2m_ctx: %p\n", ctx, ctx->fh.m2m_ctx); open_unlock: @@ -969,7 +1247,7 @@ static int vim2m_release(struct file *file) struct vim2m_dev *dev = video_drvdata(file); struct vim2m_ctx *ctx = file2ctx(file); - dprintk(dev, "Releasing instance %p\n", ctx); + dprintk(dev, 1, "Releasing instance %p\n", ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); @@ -1024,8 +1302,6 @@ static int vim2m_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; - spin_lock_init(&dev->irqlock); - ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) return ret; @@ -1037,7 +1313,6 @@ static int vim2m_probe(struct platform_device *pdev) vfd = &dev->vfd; vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; - INIT_DELAYED_WORK(&dev->work_run, device_work); ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { @@ -1047,7 +1322,7 @@ static int vim2m_probe(struct platform_device *pdev) video_set_drvdata(vfd, dev); v4l2_info(&dev->v4l2_dev, - "Device registered as /dev/video%d\n", vfd->num); + "Device registered as /dev/video%d\n", vfd->num); platform_set_drvdata(pdev, dev); @@ -1061,12 +1336,14 @@ static int vim2m_probe(struct platform_device *pdev) #ifdef CONFIG_MEDIA_CONTROLLER dev->mdev.dev = &pdev->dev; strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model)); + strscpy(dev->mdev.bus_info, "platform:vim2m", + sizeof(dev->mdev.bus_info)); media_device_init(&dev->mdev); dev->mdev.ops = &m2m_media_ops; dev->v4l2_dev.mdev = &dev->mdev; - ret = v4l2_m2m_register_media_controller(dev->m2m_dev, - vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER); + ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd, + MEDIA_ENT_F_PROC_VIDEO_SCALER); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n"); goto unreg_m2m; diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile index 4b2e3de7856e..c4fc8e7d365a 100644 --- a/drivers/media/platform/vimc/Makefile +++ b/drivers/media/platform/vimc/Makefile @@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o vimc_debayer-objs := vimc-debayer.o vimc_scaler-objs := vimc-scaler.o vimc_sensor-objs := vimc-sensor.o +vimc_streamer-objs := vimc-streamer.o obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \ - vimc_scaler.o vimc_sensor.o + vimc_scaler.o vimc_sensor.o vimc_streamer.o diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c index 3f7e9ed56633..ea869631a3f6 100644 --- a/drivers/media/platform/vimc/vimc-capture.c +++ b/drivers/media/platform/vimc/vimc-capture.c @@ -24,6 +24,7 @@ #include <media/videobuf2-vmalloc.h> #include "vimc-common.h" +#include "vimc-streamer.h" #define VIMC_CAP_DRV_NAME "vimc-capture" @@ -44,7 +45,7 @@ struct vimc_cap_device { spinlock_t qlock; struct mutex lock; u32 sequence; - struct media_pipeline pipe; + struct vimc_stream stream; }; static const struct v4l2_pix_format fmt_default = { @@ -69,12 +70,10 @@ struct vimc_cap_buffer { static int vimc_cap_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { - struct vimc_cap_device *vcap = video_drvdata(file); - - strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); + strscpy(cap->driver, VIMC_PDEV_NAME, sizeof(cap->driver)); strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), - "platform:%s", vcap->vdev.v4l2_dev->name); + "platform:%s", VIMC_PDEV_NAME); return 0; } @@ -248,14 +247,13 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count) vcap->sequence = 0; /* Start the media pipeline */ - ret = media_pipeline_start(entity, &vcap->pipe); + ret = media_pipeline_start(entity, &vcap->stream.pipe); if (ret) { vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED); return ret; } - /* Enable streaming from the pipe */ - ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1); + ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1); if (ret) { media_pipeline_stop(entity); vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED); @@ -273,8 +271,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq) { struct vimc_cap_device *vcap = vb2_get_drv_priv(vq); - /* Disable streaming from the pipe */ - vimc_pipeline_s_stream(&vcap->vdev.entity, 0); + vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0); /* Stop the media pipeline */ media_pipeline_stop(&vcap->vdev.entity); @@ -355,8 +352,8 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master, kfree(vcap); } -static void vimc_cap_process_frame(struct vimc_ent_device *ved, - struct media_pad *sink, const void *frame) +static void *vimc_cap_process_frame(struct vimc_ent_device *ved, + const void *frame) { struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device, ved); @@ -370,7 +367,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved, typeof(*vimc_buf), list); if (!vimc_buf) { spin_unlock(&vcap->qlock); - return; + return ERR_PTR(-EAGAIN); } /* Remove this entry from the list */ @@ -391,6 +388,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved, vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0, vcap->format.sizeimage); vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE); + return NULL; } static int vimc_cap_comp_bind(struct device *comp, struct device *master, @@ -431,7 +429,7 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master, /* Initialize the vb2 queue */ q = &vcap->queue; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - q->io_modes = VB2_MMAP | VB2_DMABUF; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR; q->drv_priv = vcap; q->buf_struct_size = sizeof(struct vimc_cap_buffer); q->ops = &vimc_cap_qops; diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c index 867e24dbd6b5..c1a74bb2df58 100644 --- a/drivers/media/platform/vimc/vimc-common.c +++ b/drivers/media/platform/vimc/vimc-common.c @@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat) } EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat); -int vimc_propagate_frame(struct media_pad *src, const void *frame) -{ - struct media_link *link; - - if (!(src->flags & MEDIA_PAD_FL_SOURCE)) - return -EINVAL; - - /* Send this frame to all sink pads that are direct linked */ - list_for_each_entry(link, &src->entity->links, list) { - if (link->source == src && - (link->flags & MEDIA_LNK_FL_ENABLED)) { - struct vimc_ent_device *ved = NULL; - struct media_entity *entity = link->sink->entity; - - if (is_media_entity_v4l2_subdev(entity)) { - struct v4l2_subdev *sd = - container_of(entity, struct v4l2_subdev, - entity); - ved = v4l2_get_subdevdata(sd); - } else if (is_media_entity_v4l2_video_device(entity)) { - struct video_device *vdev = - container_of(entity, - struct video_device, - entity); - ved = video_get_drvdata(vdev); - } - if (ved && ved->process_frame) - ved->process_frame(ved, link->sink, frame); - } - } - - return 0; -} -EXPORT_SYMBOL_GPL(vimc_propagate_frame); - /* Helper function to allocate and initialize pads */ struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag) { diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h index 2e9981b18166..84539430b5e7 100644 --- a/drivers/media/platform/vimc/vimc-common.h +++ b/drivers/media/platform/vimc/vimc-common.h @@ -22,6 +22,8 @@ #include <media/media-device.h> #include <media/v4l2-device.h> +#define VIMC_PDEV_NAME "vimc" + /* VIMC-specific controls */ #define VIMC_CID_VIMC_BASE (0x00f00000 | 0xf000) #define VIMC_CID_VIMC_CLASS (0x00f00000 | 1) @@ -113,24 +115,13 @@ struct vimc_pix_map { struct vimc_ent_device { struct media_entity *ent; struct media_pad *pads; - void (*process_frame)(struct vimc_ent_device *ved, - struct media_pad *sink, const void *frame); + void * (*process_frame)(struct vimc_ent_device *ved, + const void *frame); void (*vdev_get_format)(struct vimc_ent_device *ved, struct v4l2_pix_format *fmt); }; /** - * vimc_propagate_frame - propagate a frame through the topology - * - * @src: the source pad where the frame is being originated - * @frame: the frame to be propagated - * - * This function will call the process_frame callback from the vimc_ent_device - * struct of the nodes directly connected to the @src pad - */ -int vimc_propagate_frame(struct media_pad *src, const void *frame); - -/** * vimc_pads_init - initialize pads * * @num_pads: number of pads to initialize diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c index ce809d2e3d53..0fbb7914098f 100644 --- a/drivers/media/platform/vimc/vimc-core.c +++ b/drivers/media/platform/vimc/vimc-core.c @@ -24,7 +24,6 @@ #include "vimc-common.h" -#define VIMC_PDEV_NAME "vimc" #define VIMC_MDEV_MODEL_NAME "VIMC MDEV" #define VIMC_ENT_LINK(src, srcpad, sink, sinkpad, link_flags) { \ @@ -221,6 +220,7 @@ static int vimc_comp_bind(struct device *master) err_mdev_unregister: media_device_unregister(&vimc->mdev); + media_device_cleanup(&vimc->mdev); err_comp_unbind_all: component_unbind_all(master, NULL); err_v4l2_unregister: @@ -237,6 +237,7 @@ static void vimc_comp_unbind(struct device *master) dev_dbg(master, "unbind"); media_device_unregister(&vimc->mdev); + media_device_cleanup(&vimc->mdev); component_unbind_all(master, NULL); v4l2_device_unregister(&vimc->v4l2_dev); } @@ -319,6 +320,8 @@ static int vimc_probe(struct platform_device *pdev) /* Initialize media device */ strscpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME, sizeof(vimc->mdev.model)); + snprintf(vimc->mdev.bus_info, sizeof(vimc->mdev.bus_info), + "platform:%s", VIMC_PDEV_NAME); vimc->mdev.dev = &pdev->dev; media_device_init(&vimc->mdev); diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c index 77887f66f323..7d77c63b99d2 100644 --- a/drivers/media/platform/vimc/vimc-debayer.c +++ b/drivers/media/platform/vimc/vimc-debayer.c @@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb, static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd); - int ret; if (enable) { const struct vimc_pix_map *vpix; @@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable) if (!vdeb->src_frame) return -ENOMEM; - /* Turn the stream on in the subdevices directly connected */ - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1); - if (ret) { - vfree(vdeb->src_frame); - vdeb->src_frame = NULL; - return ret; - } } else { if (!vdeb->src_frame) return 0; - /* Disable streaming from the pipe */ - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0); - if (ret) - return ret; - vfree(vdeb->src_frame); vdeb->src_frame = NULL; } @@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb, } } -static void vimc_deb_process_frame(struct vimc_ent_device *ved, - struct media_pad *sink, - const void *sink_frame) +static void *vimc_deb_process_frame(struct vimc_ent_device *ved, + const void *sink_frame) { struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device, ved); @@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved, /* If the stream in this node is not active, just return */ if (!vdeb->src_frame) - return; + return ERR_PTR(-EINVAL); for (i = 0; i < vdeb->sink_fmt.height; i++) for (j = 0; j < vdeb->sink_fmt.width; j++) { @@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved, vdeb->set_rgb_src(vdeb, i, j, rgb); } - /* Propagate the frame through all source pads */ - for (i = 1; i < vdeb->sd.entity.num_pads; i++) { - struct media_pad *pad = &vdeb->sd.entity.pads[i]; + return vdeb->src_frame; - vimc_propagate_frame(pad, vdeb->src_frame); - } } static void vimc_deb_comp_unbind(struct device *comp, struct device *master, diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c index b0952ee86296..39b2a73dfcc1 100644 --- a/drivers/media/platform/vimc/vimc-scaler.c +++ b/drivers/media/platform/vimc/vimc-scaler.c @@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = { static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd); - int ret; if (enable) { const struct vimc_pix_map *vpix; @@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable) if (!vsca->src_frame) return -ENOMEM; - /* Turn the stream on in the subdevices directly connected */ - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1); - if (ret) { - vfree(vsca->src_frame); - vsca->src_frame = NULL; - return ret; - } } else { if (!vsca->src_frame) return 0; - /* Disable streaming from the pipe */ - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0); - if (ret) - return ret; - vfree(vsca->src_frame); vsca->src_frame = NULL; } @@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca, vimc_sca_scale_pix(vsca, i, j, sink_frame); } -static void vimc_sca_process_frame(struct vimc_ent_device *ved, - struct media_pad *sink, - const void *sink_frame) +static void *vimc_sca_process_frame(struct vimc_ent_device *ved, + const void *sink_frame) { struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device, ved); - unsigned int i; /* If the stream in this node is not active, just return */ if (!vsca->src_frame) - return; + return ERR_PTR(-EINVAL); vimc_sca_fill_src_frame(vsca, sink_frame); - /* Propagate the frame through all source pads */ - for (i = 1; i < vsca->sd.entity.num_pads; i++) { - struct media_pad *pad = &vsca->sd.entity.pads[i]; - - vimc_propagate_frame(pad, vsca->src_frame); - } + return vsca->src_frame; }; static void vimc_sca_comp_unbind(struct device *comp, struct device *master, diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c index 32ca9c6172b1..59195f262623 100644 --- a/drivers/media/platform/vimc/vimc-sensor.c +++ b/drivers/media/platform/vimc/vimc-sensor.c @@ -16,8 +16,6 @@ */ #include <linux/component.h> -#include <linux/freezer.h> -#include <linux/kthread.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> @@ -201,38 +199,20 @@ static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = { .set_fmt = vimc_sen_set_fmt, }; -static int vimc_sen_tpg_thread(void *data) +static void *vimc_sen_process_frame(struct vimc_ent_device *ved, + const void *sink_frame) { - struct vimc_sen_device *vsen = data; - unsigned int i; - - set_freezable(); - set_current_state(TASK_UNINTERRUPTIBLE); - - for (;;) { - try_to_freeze(); - if (kthread_should_stop()) - break; + struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device, + ved); - tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame); - - /* Send the frame to all source pads */ - for (i = 0; i < vsen->sd.entity.num_pads; i++) - vimc_propagate_frame(&vsen->sd.entity.pads[i], - vsen->frame); - - /* 60 frames per second */ - schedule_timeout(HZ/60); - } - - return 0; + tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame); + return vsen->frame; } static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_sen_device *vsen = container_of(sd, struct vimc_sen_device, sd); - int ret; if (enable) { const struct vimc_pix_map *vpix; @@ -258,26 +238,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable) /* configure the test pattern generator */ vimc_sen_tpg_s_format(vsen); - /* Initialize the image generator thread */ - vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen, - "%s-sen", vsen->sd.v4l2_dev->name); - if (IS_ERR(vsen->kthread_sen)) { - dev_err(vsen->dev, "%s: kernel_thread() failed\n", - vsen->sd.name); - vfree(vsen->frame); - vsen->frame = NULL; - return PTR_ERR(vsen->kthread_sen); - } } else { - if (!vsen->kthread_sen) - return 0; - - /* Stop image generator */ - ret = kthread_stop(vsen->kthread_sen); - if (ret) - return ret; - vsen->kthread_sen = NULL; vfree(vsen->frame); vsen->frame = NULL; return 0; @@ -413,6 +375,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master, if (ret) goto err_free_hdl; + vsen->ved.process_frame = vimc_sen_process_frame; dev_set_drvdata(comp, &vsen->ved); vsen->dev = comp; diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c new file mode 100644 index 000000000000..fcc897fb247b --- /dev/null +++ b/drivers/media/platform/vimc/vimc-streamer.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * vimc-streamer.c Virtual Media Controller Driver + * + * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com> + * + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/freezer.h> +#include <linux/kthread.h> + +#include "vimc-streamer.h" + +/** + * vimc_get_source_entity - get the entity connected with the first sink pad + * + * @ent: reference media_entity + * + * Helper function that returns the media entity containing the source pad + * linked with the first sink pad from the given media entity pad list. + */ +static struct media_entity *vimc_get_source_entity(struct media_entity *ent) +{ + struct media_pad *pad; + int i; + + for (i = 0; i < ent->num_pads; i++) { + if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE) + continue; + pad = media_entity_remote_pad(&ent->pads[i]); + return pad ? pad->entity : NULL; + } + return NULL; +} + +/* + * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream + * + * @stream: the pointer to the stream structure with the pipeline to be + * disabled. + * + * Calls s_stream to disable the stream in each entity of the pipeline + * + */ +static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream) +{ + struct media_entity *entity; + struct v4l2_subdev *sd; + + while (stream->pipe_size) { + stream->pipe_size--; + entity = stream->ved_pipeline[stream->pipe_size]->ent; + entity = vimc_get_source_entity(entity); + stream->ved_pipeline[stream->pipe_size] = NULL; + + if (!is_media_entity_v4l2_subdev(entity)) + continue; + + sd = media_entity_to_v4l2_subdev(entity); + v4l2_subdev_call(sd, video, s_stream, 0); + } +} + +/* + * vimc_streamer_pipeline_init - initializes the stream structure + * + * @stream: the pointer to the stream structure to be initialized + * @ved: the pointer to the vimc entity initializing the stream + * + * Initializes the stream structure. Walks through the entity graph to + * construct the pipeline used later on the streamer thread. + * Calls s_stream to enable stream in all entities of the pipeline. + */ +static int vimc_streamer_pipeline_init(struct vimc_stream *stream, + struct vimc_ent_device *ved) +{ + struct media_entity *entity; + struct video_device *vdev; + struct v4l2_subdev *sd; + int ret = 0; + + stream->pipe_size = 0; + while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) { + if (!ved) { + vimc_streamer_pipeline_terminate(stream); + return -EINVAL; + } + stream->ved_pipeline[stream->pipe_size++] = ved; + + entity = vimc_get_source_entity(ved->ent); + /* Check if the end of the pipeline was reached*/ + if (!entity) + return 0; + + if (is_media_entity_v4l2_subdev(entity)) { + sd = media_entity_to_v4l2_subdev(entity); + ret = v4l2_subdev_call(sd, video, s_stream, 1); + if (ret && ret != -ENOIOCTLCMD) { + vimc_streamer_pipeline_terminate(stream); + return ret; + } + ved = v4l2_get_subdevdata(sd); + } else { + vdev = container_of(entity, + struct video_device, + entity); + ved = video_get_drvdata(vdev); + } + } + + vimc_streamer_pipeline_terminate(stream); + return -EINVAL; +} + +static int vimc_streamer_thread(void *data) +{ + struct vimc_stream *stream = data; + int i; + + set_freezable(); + set_current_state(TASK_UNINTERRUPTIBLE); + + for (;;) { + try_to_freeze(); + if (kthread_should_stop()) + break; + + for (i = stream->pipe_size - 1; i >= 0; i--) { + stream->frame = stream->ved_pipeline[i]->process_frame( + stream->ved_pipeline[i], + stream->frame); + if (!stream->frame) + break; + if (IS_ERR(stream->frame)) + break; + } + //wait for 60hz + schedule_timeout(HZ / 60); + } + + return 0; +} + +int vimc_streamer_s_stream(struct vimc_stream *stream, + struct vimc_ent_device *ved, + int enable) +{ + int ret; + + if (!stream || !ved) + return -EINVAL; + + if (enable) { + if (stream->kthread) + return 0; + + ret = vimc_streamer_pipeline_init(stream, ved); + if (ret) + return ret; + + stream->kthread = kthread_run(vimc_streamer_thread, stream, + "vimc-streamer thread"); + + if (IS_ERR(stream->kthread)) + return PTR_ERR(stream->kthread); + + } else { + if (!stream->kthread) + return 0; + + ret = kthread_stop(stream->kthread); + if (ret) + return ret; + + stream->kthread = NULL; + + vimc_streamer_pipeline_terminate(stream); + } + + return 0; +} +EXPORT_SYMBOL_GPL(vimc_streamer_s_stream); + +MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer"); +MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h new file mode 100644 index 000000000000..752af2e2d5a2 --- /dev/null +++ b/drivers/media/platform/vimc/vimc-streamer.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * vimc-streamer.h Virtual Media Controller Driver + * + * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com> + * + */ + +#ifndef _VIMC_STREAMER_H_ +#define _VIMC_STREAMER_H_ + +#include <media/media-device.h> + +#include "vimc-common.h" + +#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16 + +struct vimc_stream { + struct media_pipeline pipe; + struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE]; + unsigned int pipe_size; + u8 *frame; + struct task_struct *kthread; +}; + +/** + * vimc_streamer_s_streamer - start/stop the stream + * + * @stream: the pointer to the stream to start or stop + * @ved: The last entity of the streamer pipeline + * @enable: any non-zero number start the stream, zero stop + * + */ +int vimc_streamer_s_stream(struct vimc_stream *stream, + struct vimc_ent_device *ved, + int enable); + +#endif //_VIMC_STREAMER_H_ diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c index c931f007e5b0..342e0e6c103b 100644 --- a/drivers/media/platform/vivid/vivid-core.c +++ b/drivers/media/platform/vivid/vivid-core.c @@ -371,7 +371,7 @@ static int vidioc_s_parm(struct file *file, void *fh, if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_parm(file, fh, parm); - return vivid_vid_out_g_parm(file, fh, parm); + return -ENOTTY; } static int vidioc_log_status(struct file *file, void *fh) @@ -1094,7 +1094,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q = &dev->vb_vid_cap_q; q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_vid_cap_qops; @@ -1115,7 +1117,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q = &dev->vb_vid_out_q; q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_vid_out_qops; @@ -1136,7 +1140,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q = &dev->vb_vbi_cap_q; q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE : V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_vbi_cap_qops; @@ -1157,7 +1163,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q = &dev->vb_vbi_out_q; q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT : V4L2_BUF_TYPE_SLICED_VBI_OUTPUT; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_vbi_out_qops; @@ -1177,7 +1185,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) /* initialize sdr_cap queue */ q = &dev->vb_sdr_cap_q; q->type = V4L2_BUF_TYPE_SDR_CAPTURE; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_sdr_cap_qops; @@ -1468,9 +1478,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) return 0; unreg_dev: -#ifdef CONFIG_MEDIA_CONTROLLER - media_device_unregister(&dev->mdev); -#endif video_unregister_device(&dev->radio_tx_dev); video_unregister_device(&dev->radio_rx_dev); video_unregister_device(&dev->sdr_cap_dev); @@ -1543,6 +1550,7 @@ static int vivid_remove(struct platform_device *pdev) #ifdef CONFIG_MEDIA_CONTROLLER media_device_unregister(&dev->mdev); + media_device_cleanup(&dev->mdev); #endif if (dev->has_vid_cap) { diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index c059fc12668a..52eeda624d7e 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c @@ -124,7 +124,8 @@ static int vid_cap_queue_setup(struct vb2_queue *vq, } } else { for (p = 0; p < buffers; p++) - sizes[p] = tpg_g_line_width(&dev->tpg, p) * h + + sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / + dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]; } @@ -161,7 +162,9 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb) return -EINVAL; } for (p = 0; p < buffers; p++) { - size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height + + size = (tpg_g_line_width(&dev->tpg, p) * + dev->fmt_cap_rect.height) / + dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]; if (vb2_plane_size(vb, p) < size) { @@ -545,7 +548,8 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv, for (p = 0; p < mp->num_planes; p++) { mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); mp->plane_fmt[p].sizeimage = - tpg_g_line_width(&dev->tpg, p) * mp->height + + (tpg_g_line_width(&dev->tpg, p) * mp->height) / + dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]; } return 0; diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c index 661f4015fba1..74b83bcc6119 100644 --- a/drivers/media/platform/vivid/vivid-vid-common.c +++ b/drivers/media/platform/vivid/vivid-vid-common.c @@ -169,6 +169,36 @@ struct vivid_fmt vivid_formats[] = { .alpha_mask = 0x000000ff, }, { + .fourcc = V4L2_PIX_FMT_AYUV32, + .vdownsampling = { 1 }, + .bit_depth = { 32 }, + .planes = 1, + .buffers = 1, + .alpha_mask = 0x000000ff, + }, + { + .fourcc = V4L2_PIX_FMT_XYUV32, + .vdownsampling = { 1 }, + .bit_depth = { 32 }, + .planes = 1, + .buffers = 1, + }, + { + .fourcc = V4L2_PIX_FMT_VUYA32, + .vdownsampling = { 1 }, + .bit_depth = { 32 }, + .planes = 1, + .buffers = 1, + .alpha_mask = 0xff000000, + }, + { + .fourcc = V4L2_PIX_FMT_VUYX32, + .vdownsampling = { 1 }, + .bit_depth = { 32 }, + .planes = 1, + .buffers = 1, + }, + { .fourcc = V4L2_PIX_FMT_GREY, .vdownsampling = { 1 }, .bit_depth = { 8 }, diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index ea250aee2b2e..e61b91b414f9 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -28,11 +28,12 @@ static int vid_out_queue_setup(struct vb2_queue *vq, const struct vivid_fmt *vfmt = dev->fmt_out; unsigned planes = vfmt->buffers; unsigned h = dev->fmt_out_rect.height; - unsigned size = dev->bytesperline_out[0] * h; + unsigned int size = dev->bytesperline_out[0] * h + vfmt->data_offset[0]; unsigned p; for (p = vfmt->buffers; p < vfmt->planes; p++) - size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p]; + size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p] + + vfmt->data_offset[p]; if (dev->field_out == V4L2_FIELD_ALTERNATE) { /* @@ -62,12 +63,14 @@ static int vid_out_queue_setup(struct vb2_queue *vq, if (sizes[0] < size) return -EINVAL; for (p = 1; p < planes; p++) { - if (sizes[p] < dev->bytesperline_out[p] * h) + if (sizes[p] < dev->bytesperline_out[p] * h + + vfmt->data_offset[p]) return -EINVAL; } } else { for (p = 0; p < planes; p++) - sizes[p] = p ? dev->bytesperline_out[p] * h : size; + sizes[p] = p ? dev->bytesperline_out[p] * h + + vfmt->data_offset[p] : size; } if (vq->num_buffers + *nbuffers < 2) @@ -81,21 +84,38 @@ static int vid_out_queue_setup(struct vb2_queue *vq, return 0; } -static int vid_out_buf_prepare(struct vb2_buffer *vb) +static int vid_out_buf_out_validate(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); - unsigned long size; - unsigned planes; + + dprintk(dev, 1, "%s\n", __func__); + + if (dev->field_out != V4L2_FIELD_ALTERNATE) + vbuf->field = dev->field_out; + else if (vbuf->field != V4L2_FIELD_TOP && + vbuf->field != V4L2_FIELD_BOTTOM) + return -EINVAL; + return 0; +} + +static int vid_out_buf_prepare(struct vb2_buffer *vb) +{ + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); + const struct vivid_fmt *vfmt = dev->fmt_out; + unsigned int planes = vfmt->buffers; + unsigned int h = dev->fmt_out_rect.height; + unsigned int size = dev->bytesperline_out[0] * h; unsigned p; + for (p = vfmt->buffers; p < vfmt->planes; p++) + size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p]; + dprintk(dev, 1, "%s\n", __func__); if (WARN_ON(NULL == dev->fmt_out)) return -EINVAL; - planes = dev->fmt_out->planes; - if (dev->buf_prepare_error) { /* * Error injection: test what happens if buf_prepare() returns @@ -105,18 +125,13 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb) return -EINVAL; } - if (dev->field_out != V4L2_FIELD_ALTERNATE) - vbuf->field = dev->field_out; - else if (vbuf->field != V4L2_FIELD_TOP && - vbuf->field != V4L2_FIELD_BOTTOM) - return -EINVAL; - for (p = 0; p < planes; p++) { - size = dev->bytesperline_out[p] * dev->fmt_out_rect.height + - vb->planes[p].data_offset; + if (p) + size = dev->bytesperline_out[p] * h; + size += vb->planes[p].data_offset; if (vb2_get_plane_payload(vb, p) < size) { - dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n", + dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %u)\n", __func__, p, vb2_get_plane_payload(vb, p), size); return -EINVAL; } @@ -188,6 +203,7 @@ static void vid_out_buf_request_complete(struct vb2_buffer *vb) const struct vb2_ops vivid_vid_out_qops = { .queue_setup = vid_out_queue_setup, + .buf_out_validate = vid_out_buf_out_validate, .buf_prepare = vid_out_buf_prepare, .buf_queue = vid_out_buf_queue, .start_streaming = vid_out_start_streaming, @@ -321,7 +337,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv, for (p = 0; p < mp->num_planes; p++) { mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p]; mp->plane_fmt[p].sizeimage = - mp->plane_fmt[p].bytesperline * mp->height; + mp->plane_fmt[p].bytesperline * mp->height + + fmt->data_offset[p]; } for (p = fmt->buffers; p < fmt->planes; p++) { unsigned stride = dev->bytesperline_out[p]; @@ -399,7 +416,7 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv, pfmt[p].bytesperline = bytesperline; pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / - fmt->vdownsampling[p]; + fmt->vdownsampling[p] + fmt->data_offset[p]; memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); } diff --git a/drivers/media/platform/vsp1/vsp1_brx.c b/drivers/media/platform/vsp1/vsp1_brx.c index 5e50178b057d..58ad248cd7f7 100644 --- a/drivers/media/platform/vsp1/vsp1_brx.c +++ b/drivers/media/platform/vsp1/vsp1_brx.c @@ -296,7 +296,7 @@ static void brx_configure_stream(struct vsp1_entity *entity, /* * The hardware is extremely flexible but we have no userspace API to * expose all the parameters, nor is it clear whether we would have use - * cases for all the supported modes. Let's just harcode the parameters + * cases for all the supported modes. Let's just hardcode the parameters * to sane default values for now. */ @@ -373,7 +373,7 @@ static void brx_configure_stream(struct vsp1_entity *entity, vsp1_brx_write(brx, dlb, VI6_BRU_CTRL(i), ctrl); /* - * Harcode the blending formula to + * Hardcode the blending formula to * * DSTc = DSTc * (1 - SRCa) + SRCc * SRCa * DSTa = DSTa * (1 - SRCa) + SRCa diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index 8d86f618ec77..84895385d2e5 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -333,19 +333,19 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1, * on the BRx sink pad 0 and propagated inside the entity, not on the * source pad. */ - format.pad = pipe->brx->source_pad; + format.pad = brx->source_pad; format.format.width = drm_pipe->width; format.format.height = drm_pipe->height; format.format.field = V4L2_FIELD_NONE; - ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_fmt, NULL, + ret = v4l2_subdev_call(&brx->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n", __func__, format.format.width, format.format.height, - format.format.code, BRX_NAME(pipe->brx), pipe->brx->source_pad); + format.format.code, BRX_NAME(brx), brx->source_pad); if (format.format.width != drm_pipe->width || format.format.height != drm_pipe->height) { diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index 771dfe1f7c20..7ceaf3222145 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -223,7 +223,7 @@ static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe, * If the modulus is less than half of the partition size, * the penultimate partition is reduced to half, which is added * to the final partition: |1234|1234|1234|12|341| - * to prevents this: |1234|1234|1234|1234|1|. + * to prevent this: |1234|1234|1234|1234|1|. */ if (modulus) { /* diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c index 18f98838111b..08a825c3a3f6 100644 --- a/drivers/media/platform/xilinx/xilinx-vip.c +++ b/drivers/media/platform/xilinx/xilinx-vip.c @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(xvip_set_format_size); * the register, otherwise the bitmask is cleared from the register * when the flag @set is false. * - * Fox eample, this function can be used to set a control with a boolean value + * Fox example, this function can be used to set a control with a boolean value * requested by users. If the caller knows whether to set or clear in the first * place, the caller should call xvip_clr() or xvip_set() directly instead of * using this function. |