diff options
Diffstat (limited to 'drivers/gpu/drm/msm')
132 files changed, 3507 insertions, 2134 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 7ec833b6d829..7f127e2ae442 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -104,6 +104,7 @@ config DRM_MSM_DPU config DRM_MSM_DP bool "Enable DisplayPort support in MSM DRM driver" depends on DRM_MSM + select DRM_DISPLAY_HDMI_AUDIO_HELPER select RATIONAL default y help @@ -170,6 +171,8 @@ config DRM_MSM_HDMI bool "Enable HDMI support in MSM DRM driver" depends on DRM_MSM default y + select DRM_DISPLAY_HDMI_HELPER + select DRM_DISPLAY_HDMI_STATE_HELPER help Compile in support for the HDMI output MSM DRM driver. It can be a primary or a secondary display on device. Note that this is used diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 5df20cbeafb8..7a2ada6e2d74 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -48,7 +48,6 @@ msm-display-$(CONFIG_DRM_MSM_MDP4) += \ disp/mdp4/mdp4_dsi_encoder.o \ disp/mdp4/mdp4_dtv_encoder.o \ disp/mdp4/mdp4_lcdc_encoder.o \ - disp/mdp4/mdp4_lvds_connector.o \ disp/mdp4/mdp4_lvds_pll.o \ disp/mdp4/mdp4_irq.o \ disp/mdp4/mdp4_kms.o \ diff --git a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c index 9ddb7b31fd98..5ddd015f930d 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c @@ -45,8 +45,3 @@ static const struct adreno_info a2xx_gpus[] = { } }; DECLARE_ADRENO_GPULIST(a2xx); - -MODULE_FIRMWARE("qcom/leia_pfp_470.fw"); -MODULE_FIRMWARE("qcom/leia_pm4_470.fw"); -MODULE_FIRMWARE("qcom/yamato_pfp.fw"); -MODULE_FIRMWARE("qcom/yamato_pm4.fw"); diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c index 39641551eeb6..4280f71e472a 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c @@ -71,10 +71,6 @@ static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) return 0; } -static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu) -{ -} - static void a2xx_gpummu_destroy(struct msm_mmu *mmu) { struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); @@ -90,7 +86,6 @@ static const struct msm_mmu_funcs funcs = { .map = a2xx_gpummu_map, .unmap = a2xx_gpummu_unmap, .destroy = a2xx_gpummu_destroy, - .resume_translation = a2xx_gpummu_resume_translation, }; struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu) diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c index 2eb6c3e93748..1498e6532f62 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c @@ -85,8 +85,3 @@ static const struct adreno_info a3xx_gpus[] = { } }; DECLARE_ADRENO_GPULIST(a3xx); - -MODULE_FIRMWARE("qcom/a300_pm4.fw"); -MODULE_FIRMWARE("qcom/a300_pfp.fw"); -MODULE_FIRMWARE("qcom/a330_pm4.fw"); -MODULE_FIRMWARE("qcom/a330_pfp.fw"); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c index 93519f807f87..09f9f228b75e 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c @@ -45,6 +45,3 @@ static const struct adreno_info a4xx_gpus[] = { } }; DECLARE_ADRENO_GPULIST(a4xx); - -MODULE_FIRMWARE("qcom/a420_pm4.fw"); -MODULE_FIRMWARE("qcom/a420_pfp.fw"); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c index 633f31539162..b48a636d8237 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c @@ -150,12 +150,3 @@ static const struct adreno_info a5xx_gpus[] = { } }; DECLARE_ADRENO_GPULIST(a5xx); - -MODULE_FIRMWARE("qcom/a530_pm4.fw"); -MODULE_FIRMWARE("qcom/a530_pfp.fw"); -MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2"); -MODULE_FIRMWARE("qcom/a530_zap.mdt"); -MODULE_FIRMWARE("qcom/a530_zap.b00"); -MODULE_FIRMWARE("qcom/a530_zap.b01"); -MODULE_FIRMWARE("qcom/a530_zap.b02"); -MODULE_FIRMWARE("qcom/a540_gpmu.fw2"); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 71dca78cd7a5..60aef0796236 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -131,6 +131,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; + adreno_check_and_reenable_stall(adreno_gpu); + if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { ring->cur_ctx_seqno = 0; a5xx_submit_in_rb(gpu, submit); @@ -1253,7 +1255,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu) gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); /* Turn off the hangcheck timer to keep it from bothering us */ - del_timer(&gpu->hangcheck_timer); + timer_delete(&gpu->hangcheck_timer); kthread_queue_work(gpu->worker, &gpu->recover_work); } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 0469fea55010..b5f9d40687d5 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -79,7 +79,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) static void a5xx_preempt_timer(struct timer_list *t) { - struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer); + struct a5xx_gpu *a5xx_gpu = timer_container_of(a5xx_gpu, t, + preempt_timer); struct msm_gpu *gpu = &a5xx_gpu->base.base; struct drm_device *dev = gpu->dev; @@ -182,7 +183,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu) return; /* Delete the preemption watchdog timer */ - del_timer(&a5xx_gpu->preempt_timer); + timer_delete(&a5xx_gpu->preempt_timer); /* * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index edffb7737a97..70f7ad806c34 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -681,6 +681,7 @@ static const struct adreno_info a6xx_gpus[] = { [ADRENO_FW_SQE] = "a630_sqe.fw", }, .gmem = (SZ_128K + SZ_4K), + .quirks = ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a6xx_gpu_init, .zapfw = "a610_zap.mdt", @@ -713,6 +714,7 @@ static const struct adreno_info a6xx_gpus[] = { [ADRENO_FW_GMU] = "a630_gmu.bin", }, .gmem = SZ_512K, + .quirks = ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a6xx_gpu_init, .zapfw = "a615_zap.mdt", @@ -743,7 +745,8 @@ static const struct adreno_info a6xx_gpus[] = { }, .gmem = SZ_512K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_4GB_VA, .init = a6xx_gpu_init, .zapfw = "a615_zap.mbn", .a6xx = &(const struct a6xx_info) { @@ -769,7 +772,8 @@ static const struct adreno_info a6xx_gpus[] = { }, .gmem = SZ_512K, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_4GB_VA, .init = a6xx_gpu_init, .a6xx = &(const struct a6xx_info) { .protect = &a630_protect, @@ -791,6 +795,7 @@ static const struct adreno_info a6xx_gpus[] = { [ADRENO_FW_GMU] = "a619_gmu.bin", }, .gmem = SZ_512K, + .quirks = ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a6xx_gpu_init, .zapfw = "a615_zap.mdt", @@ -815,6 +820,7 @@ static const struct adreno_info a6xx_gpus[] = { [ADRENO_FW_GMU] = "a619_gmu.bin", }, .gmem = SZ_512K, + .quirks = ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a6xx_gpu_init, .zapfw = "a615_zap.mdt", @@ -838,8 +844,9 @@ static const struct adreno_info a6xx_gpus[] = { [ADRENO_FW_GMU] = "a619_gmu.bin", }, .gmem = SZ_512K, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT, .init = a6xx_gpu_init, .zapfw = "a615_zap.mdt", .a6xx = &(const struct a6xx_info) { @@ -874,12 +881,39 @@ static const struct adreno_info a6xx_gpus[] = { .gmu_cgc_mode = 0x00020200, .prim_fifo_threshold = 0x00010000, }, - .address_space_size = SZ_16G, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, { 137, 1 }, ), }, { + .chip_ids = ADRENO_CHIP_IDS(0x06020300), + .family = ADRENO_6XX_GEN3, + .fw = { + [ADRENO_FW_SQE] = "a650_sqe.fw", + [ADRENO_FW_GMU] = "a623_gmu.bin", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_HAS_HW_APRIV, + .init = a6xx_gpu_init, + .a6xx = &(const struct a6xx_info) { + .hwcg = a690_hwcg, + .protect = &a650_protect, + .gmu_cgc_mode = 0x00020200, + .prim_fifo_threshold = 0x00010000, + .bcms = (const struct a6xx_bcm[]) { + { .name = "SH0", .buswidth = 16 }, + { .name = "MC0", .buswidth = 4 }, + { + .name = "ACV", + .fixed = true, + .perfmode = BIT(3), + }, + { /* sentinel */ }, + }, + }, + }, { .chip_ids = ADRENO_CHIP_IDS( 0x06030001, 0x06030002 @@ -891,8 +925,9 @@ static const struct adreno_info a6xx_gpus[] = { [ADRENO_FW_GMU] = "a630_gmu.bin", }, .gmem = SZ_1M, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT, .init = a6xx_gpu_init, .zapfw = "a630_zap.mdt", .a6xx = &(const struct a6xx_info) { @@ -910,8 +945,9 @@ static const struct adreno_info a6xx_gpus[] = { [ADRENO_FW_GMU] = "a640_gmu.bin", }, .gmem = SZ_1M, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT, .init = a6xx_gpu_init, .zapfw = "a640_zap.mdt", .a6xx = &(const struct a6xx_info) { @@ -944,7 +980,6 @@ static const struct adreno_info a6xx_gpus[] = { .gmu_cgc_mode = 0x00020202, .prim_fifo_threshold = 0x00300200, }, - .address_space_size = SZ_16G, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, { 1, 1 }, @@ -971,7 +1006,6 @@ static const struct adreno_info a6xx_gpus[] = { .gmu_cgc_mode = 0x00020000, .prim_fifo_threshold = 0x00300200, }, - .address_space_size = SZ_16G, }, { .chip_ids = ADRENO_CHIP_IDS(0x06060300), .family = ADRENO_6XX_GEN4, @@ -990,7 +1024,6 @@ static const struct adreno_info a6xx_gpus[] = { .gmu_cgc_mode = 0x00020200, .prim_fifo_threshold = 0x00300200, }, - .address_space_size = SZ_16G, }, { .chip_ids = ADRENO_CHIP_IDS(0x06030500), .family = ADRENO_6XX_GEN4, @@ -1010,7 +1043,6 @@ static const struct adreno_info a6xx_gpus[] = { .gmu_cgc_mode = 0x00020202, .prim_fifo_threshold = 0x00200200, }, - .address_space_size = SZ_16G, .speedbins = ADRENO_SPEEDBINS( { 0, 0 }, { 117, 0 }, @@ -1027,8 +1059,9 @@ static const struct adreno_info a6xx_gpus[] = { [ADRENO_FW_GMU] = "a640_gmu.bin", }, .gmem = SZ_2M, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_4GB_VA, .inactive_period = DRM_MSM_INACTIVE_PERIOD, - .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT, .init = a6xx_gpu_init, .zapfw = "a640_zap.mdt", .a6xx = &(const struct a6xx_info) { @@ -1056,22 +1089,10 @@ static const struct adreno_info a6xx_gpus[] = { .gmu_cgc_mode = 0x00020200, .prim_fifo_threshold = 0x00800200, }, - .address_space_size = SZ_16G, } }; DECLARE_ADRENO_GPULIST(a6xx); -MODULE_FIRMWARE("qcom/a615_zap.mbn"); -MODULE_FIRMWARE("qcom/a619_gmu.bin"); -MODULE_FIRMWARE("qcom/a630_sqe.fw"); -MODULE_FIRMWARE("qcom/a630_gmu.bin"); -MODULE_FIRMWARE("qcom/a630_zap.mbn"); -MODULE_FIRMWARE("qcom/a640_gmu.bin"); -MODULE_FIRMWARE("qcom/a650_gmu.bin"); -MODULE_FIRMWARE("qcom/a650_sqe.fw"); -MODULE_FIRMWARE("qcom/a660_gmu.bin"); -MODULE_FIRMWARE("qcom/a660_sqe.fw"); - static const struct adreno_reglist a702_hwcg[] = { { REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222 }, { REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220 }, @@ -1366,7 +1387,6 @@ static const struct adreno_info a7xx_gpus[] = { .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_cgc_mode = 0x00020000, }, - .address_space_size = SZ_16G, .preempt_record_size = 2860 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */ @@ -1400,7 +1420,6 @@ static const struct adreno_info a7xx_gpus[] = { { /* sentinel */ }, }, }, - .address_space_size = SZ_16G, .preempt_record_size = 4192 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */ @@ -1422,7 +1441,6 @@ static const struct adreno_info a7xx_gpus[] = { .gmu_chipid = 0x7050001, .gmu_cgc_mode = 0x00020202, }, - .address_space_size = SZ_256G, .preempt_record_size = 4192 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */ @@ -1455,7 +1473,6 @@ static const struct adreno_info a7xx_gpus[] = { { /* sentinel */ }, }, }, - .address_space_size = SZ_16G, .preempt_record_size = 3572 * SZ_1K, } }; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 65d38b25c070..38c0f8ef85c3 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -28,7 +28,7 @@ static void a6xx_gmu_fault(struct a6xx_gmu *gmu) gmu->hung = true; /* Turn off the hangcheck timer while we are resetting */ - del_timer(&gpu->hangcheck_timer); + timer_delete(&gpu->hangcheck_timer); /* Queue the GPU handler because we need to treat this as a recovery */ kthread_queue_work(gpu->worker, &gpu->recover_work); @@ -813,10 +813,10 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) } ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION); - DRM_INFO("Loaded GMU firmware v%u.%u.%u\n", - FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver), - FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver), - FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver)); + DRM_INFO_ONCE("Loaded GMU firmware v%u.%u.%u\n", + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver), + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver), + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver)); return 0; } @@ -1064,14 +1064,6 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) gmu->hung = false; - /* Notify AOSS about the ACD state (unimplemented for now => disable it) */ - if (!IS_ERR(gmu->qmp)) { - ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", - 0 /* Hardcode ACD to be disabled for now */); - if (ret) - dev_err(gmu->dev, "failed to send GPU ACD state\n"); - } - /* Turn on the resources */ pm_runtime_get_sync(gmu->dev); @@ -1169,49 +1161,50 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; u32 val; + int ret; /* - * The GMU may still be in slumber unless the GPU started so check and - * skip putting it back into slumber if so + * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when + * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when + * required */ - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_gpu->base.needs_hw_init) { + if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET)) + goto force_off; - if (val != 0xf) { - int ret = a6xx_gmu_wait_for_idle(gmu); + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + } - /* If the GMU isn't responding assume it is hung */ - if (ret) { - a6xx_gmu_force_off(gmu); - return; - } + ret = a6xx_gmu_wait_for_idle(gmu); - a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); + /* If the GMU isn't responding assume it is hung */ + if (ret) + goto force_off; - /* tell the GMU we want to slumber */ - ret = a6xx_gmu_notify_slumber(gmu); - if (ret) { - a6xx_gmu_force_off(gmu); - return; - } + a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); - ret = gmu_poll_timeout(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, - !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), - 100, 10000); + /* tell the GMU we want to slumber */ + ret = a6xx_gmu_notify_slumber(gmu); + if (ret) + goto force_off; - /* - * Let the user know we failed to slumber but don't worry too - * much because we are powering down anyway - */ + ret = gmu_poll_timeout(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, + !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), + 100, 10000); - if (ret) - DRM_DEV_ERROR(gmu->dev, - "Unable to slumber GMU: status = 0%x/0%x\n", - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); - } + /* + * Let the user know we failed to slumber but don't worry too + * much because we are powering down anyway + */ + + if (ret) + DRM_DEV_ERROR(gmu->dev, + "Unable to slumber GMU: status = 0%x/0%x\n", + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); /* Turn off HFI */ a6xx_hfi_stop(gmu); @@ -1221,6 +1214,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) /* Tell RPMh to power off the GPU */ a6xx_rpmh_stop(gmu); + + return; + +force_off: + a6xx_gmu_force_off(gmu); } @@ -1665,6 +1663,75 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) return a6xx_gmu_rpmh_votes_init(gmu); } +static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct a6xx_hfi_acd_table *cmd = &gmu->acd_table; + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + int ret, i, cmd_idx = 0; + extern bool disable_acd; + + /* Skip ACD probe if requested via module param */ + if (disable_acd) { + DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n"); + return 0; + } + + cmd->version = 1; + cmd->stride = 1; + cmd->enable_by_level = 0; + + /* Skip freq = 0 and parse acd-level for rest of the OPPs */ + for (i = 1; i < gmu->nr_gpu_freqs; i++) { + struct dev_pm_opp *opp; + struct device_node *np; + unsigned long freq; + u32 val; + + freq = gmu->gpu_freqs[i]; + opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true); + np = dev_pm_opp_get_of_node(opp); + + ret = of_property_read_u32(np, "qcom,opp-acd-level", &val); + of_node_put(np); + dev_pm_opp_put(opp); + if (ret == -EINVAL) + continue; + else if (ret) { + DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq); + return ret; + } + + cmd->enable_by_level |= BIT(i); + cmd->data[cmd_idx++] = val; + } + + cmd->num_levels = cmd_idx; + + /* It is a problem if qmp node is unavailable when ACD is required */ + if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) { + DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n"); + return -EINVAL; + } + + /* Otherwise, nothing to do if qmp is unavailable */ + if (IS_ERR_OR_NULL(gmu->qmp)) + return 0; + + /* + * Notify AOSS about the ACD state. AOSS is supposed to assume that ACD is disabled on + * system reset. So it is harmless if we couldn't notify 'OFF' state + */ + ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level); + if (ret && cmd->enable_by_level) { + DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n"); + return ret; + } + + return 0; +} + static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) { int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); @@ -1983,10 +2050,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) goto detach_cxpd; } + /* Other errors are handled during GPU ACD probe */ gmu->qmp = qmp_get(gmu->dev); - if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) { - ret = PTR_ERR(gmu->qmp); - goto remove_device_link; + if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto detach_gxpd; } init_completion(&gmu->pd_gate); @@ -2002,6 +2070,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) /* Get the power levels for the GMU and GPU */ a6xx_gmu_pwrlevels_probe(gmu); + ret = a6xx_gmu_acd_probe(gmu); + if (ret) + goto detach_gxpd; + /* Set up the HFI queues */ a6xx_hfi_init(gmu); @@ -2012,7 +2084,13 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) return 0; -remove_device_link: +detach_gxpd: + if (!IS_ERR_OR_NULL(gmu->gxpd)) + dev_pm_domain_detach(gmu->gxpd, false); + + if (!IS_ERR_OR_NULL(gmu->qmp)) + qmp_put(gmu->qmp); + device_link_del(link); detach_cxpd: diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 0c888b326cfb..b2d4489b4024 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -93,6 +93,7 @@ struct a6xx_gmu { int nr_gpu_freqs; unsigned long gpu_freqs[GMU_MAX_GX_FREQS]; u32 gx_arc_votes[GMU_MAX_GX_FREQS]; + struct a6xx_hfi_acd_table acd_table; int nr_gpu_bws; unsigned long gpu_bw_table[GMU_MAX_GX_FREQS]; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 0ae29a7c8a4d..491fde0083a2 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -130,6 +130,20 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->seqno - 1); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BOTH); + + /* Reset state used to synchronize BR and BV */ + OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1); + OUT_RING(ring, + CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS | + CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE | + CP_RESET_CONTEXT_STATE_0_CLEAR_BV_BR_COUNTER | + CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BR); } if (!sysprof) { @@ -212,6 +226,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; + adreno_check_and_reenable_stall(adreno_gpu); + a6xx_set_pagetable(a6xx_gpu, ring, submit); get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), @@ -242,10 +258,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; fallthrough; case MSM_SUBMIT_CMD_BUF: - OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); + OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); ibs++; break; } @@ -335,6 +351,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; + adreno_check_and_reenable_stall(adreno_gpu); + /* * Toggle concurrent binning for pagetable switch and set the thread to * BR since only it can execute the pagetable switch packets. @@ -377,10 +395,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; fallthrough; case MSM_SUBMIT_CMD_BUF: - OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); + OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); ibs++; break; } @@ -616,6 +634,14 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) gpu->ubwc_config.uavflagprd_inv = 2; } + if (adreno_is_a623(gpu)) { + gpu->ubwc_config.highest_bank_bit = 16; + gpu->ubwc_config.amsbc = 1; + gpu->ubwc_config.rgb565_predicator = 1; + gpu->ubwc_config.uavflagprd_inv = 2; + gpu->ubwc_config.macrotile_mode = 1; + } + if (adreno_is_a640_family(gpu)) gpu->ubwc_config.amsbc = 1; @@ -647,7 +673,6 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) if (adreno_is_7c3(gpu)) { gpu->ubwc_config.highest_bank_bit = 14; gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.rgb565_predicator = 1; gpu->ubwc_config.uavflagprd_inv = 2; gpu->ubwc_config.macrotile_mode = 1; } @@ -1698,7 +1723,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu) gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); /* Turn off the hangcheck timer to keep it from bothering us */ - del_timer(&gpu->hangcheck_timer); + timer_delete(&gpu->hangcheck_timer); kthread_queue_work(gpu->worker, &gpu->recover_work); } @@ -1718,7 +1743,7 @@ static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu) */ if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING | A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) { - del_timer(&gpu->hangcheck_timer); + timer_delete(&gpu->hangcheck_timer); kthread_queue_work(gpu->worker, &gpu->recover_work); } @@ -2260,7 +2285,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu) return ERR_CAST(mmu); return msm_gem_address_space_create(mmu, - "gpu", 0x100000000ULL, + "gpu", ADRENO_VM_START, adreno_private_address_space_size(gpu)); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 0fcae53c0b14..341a72a67401 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -1214,12 +1214,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); a6xx_state->gmu_registers = state_kcalloc(a6xx_state, - 3, sizeof(*a6xx_state->gmu_registers)); + 4, sizeof(*a6xx_state->gmu_registers)); if (!a6xx_state->gmu_registers) return; - a6xx_state->nr_gmu_registers = 3; + a6xx_state->nr_gmu_registers = 4; /* Get the CX GMU registers from AHB */ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0], @@ -1227,6 +1227,13 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1], &a6xx_state->gmu_registers[1], true); + if (adreno_is_a621(adreno_gpu) || adreno_is_a623(adreno_gpu)) + _a6xx_get_gmu_registers(gpu, a6xx_state, &a621_gpucc_reg, + &a6xx_state->gmu_registers[2], false); + else + _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gpucc_reg, + &a6xx_state->gmu_registers[2], false); + if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) return; @@ -1234,7 +1241,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2], - &a6xx_state->gmu_registers[2], false); + &a6xx_state->gmu_registers[3], false); } static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo( @@ -1507,6 +1514,8 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu, /* Restore the size in the hardware */ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size); + + a6xx_state->nr_indexed_regs = count; } static void a7xx_get_indexed_registers(struct msm_gpu *gpu, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index dd4c28a8d923..e545106c70be 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -363,6 +363,9 @@ static const u32 a6xx_gmu_cx_registers[] = { 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201, /* GMU AO */ 0x9300, 0x9316, 0x9400, 0x9400, +}; + +static const u32 a6xx_gmu_gpucc_registers[] = { /* GPU CC */ 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b, 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40, @@ -373,6 +376,17 @@ static const u32 a6xx_gmu_cx_registers[] = { 0xbc00, 0xbc16, 0xbc20, 0xbc27, }; +static const u32 a621_gmu_gpucc_registers[] = { + /* GPU CC */ + 0x9800, 0x980e, 0x9c00, 0x9c0e, 0xb000, 0xb004, 0xb400, 0xb404, + 0xb800, 0xb804, 0xbc00, 0xbc05, 0xbc14, 0xbc1d, 0xbc2a, 0xbc30, + 0xbc32, 0xbc32, 0xbc41, 0xbc55, 0xbc66, 0xbc68, 0xbc78, 0xbc7a, + 0xbc89, 0xbc8a, 0xbc9c, 0xbc9e, 0xbca0, 0xbca3, 0xbcb3, 0xbcb5, + 0xbcc5, 0xbcc7, 0xbcd6, 0xbcd8, 0xbce8, 0xbce9, 0xbcf9, 0xbcfc, + 0xbd0b, 0xbd0c, 0xbd1c, 0xbd1e, 0xbd40, 0xbd70, 0xbe00, 0xbe16, + 0xbe20, 0xbe2d, +}; + static const u32 a6xx_gmu_cx_rscc_registers[] = { /* GPU RSCC */ 0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347, @@ -386,6 +400,9 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = { REGS(a6xx_gmu_gx_registers, 0, 0), }; +static const struct a6xx_registers a6xx_gpucc_reg = REGS(a6xx_gmu_gpucc_registers, 0, 0); +static const struct a6xx_registers a621_gpucc_reg = REGS(a621_gmu_gpucc_registers, 0, 0); + static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu); static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index 0989aee3dd2c..8e69b1e84657 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -100,16 +100,14 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, return 0; } -static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, - u32 *payload, u32 payload_size) +static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seqnum) { - struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; - u32 val; int ret; + u32 val; /* Wait for a response */ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, - val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000); + val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000); if (ret) { DRM_DEV_ERROR(gmu->dev, @@ -122,6 +120,19 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ); + return 0; +} + +static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, + u32 *payload, u32 payload_size) +{ + struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; + int ret; + + ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum); + if (ret) + return ret; + for (;;) { struct a6xx_hfi_msg_response resp; @@ -129,12 +140,18 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp, sizeof(resp) >> 2); - /* If the queue is empty our response never made it */ + /* If the queue is empty, there may have been previous missed + * responses that preceded the response to our packet. Wait + * further before we give up. + */ if (!ret) { - DRM_DEV_ERROR(gmu->dev, - "The HFI response queue is unexpectedly empty\n"); - - return -ENOENT; + ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum); + if (ret) { + DRM_DEV_ERROR(gmu->dev, + "The HFI response queue is unexpectedly empty\n"); + return ret; + } + continue; } if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) { @@ -748,6 +765,38 @@ send: NULL, 0); } +#define HFI_FEATURE_ACD 12 + +static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table; + struct a6xx_hfi_msg_feature_ctrl msg = { + .feature = HFI_FEATURE_ACD, + .enable = 1, + .data = 0, + }; + int ret; + + if (!acd_table->enable_by_level) + return 0; + + /* Enable ACD feature at GMU */ + ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0); + if (ret) { + DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret); + return ret; + } + + /* Send ACD table to GMU */ + ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_ACD, acd_table, sizeof(*acd_table), NULL, 0); + if (ret) { + DRM_DEV_ERROR(gmu->dev, "Unable to ACD table (%d)\n", ret); + return ret; + } + + return 0; +} + static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) { struct a6xx_hfi_msg_test msg = { 0 }; @@ -845,6 +894,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) if (ret) return ret; + ret = a6xx_hfi_enable_acd(gmu); + if (ret) + return ret; + ret = a6xx_hfi_send_core_fw_start(gmu); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h index 52ba4a07d7b9..653ef720e2da 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h @@ -151,12 +151,33 @@ struct a6xx_hfi_msg_test { u32 header; }; +#define HFI_H2F_MSG_ACD 7 +#define MAX_ACD_STRIDE 2 + +struct a6xx_hfi_acd_table { + u32 header; + u32 version; + u32 enable_by_level; + u32 stride; + u32 num_levels; + u32 data[16 * MAX_ACD_STRIDE]; +}; + #define HFI_H2F_MSG_START 10 struct a6xx_hfi_msg_start { u32 header; }; +#define HFI_H2F_FEATURE_CTRL 11 + +struct a6xx_hfi_msg_feature_ctrl { + u32 header; + u32 feature; + u32 enable; + u32 data; +}; + #define HFI_H2F_MSG_CORE_FW_START 14 struct a6xx_hfi_msg_core_fw_start { diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c index 2fd4e39f618f..3b17fd2dba89 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c @@ -87,7 +87,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) static void a6xx_preempt_timer(struct timer_list *t) { - struct a6xx_gpu *a6xx_gpu = from_timer(a6xx_gpu, t, preempt_timer); + struct a6xx_gpu *a6xx_gpu = timer_container_of(a6xx_gpu, t, + preempt_timer); struct msm_gpu *gpu = &a6xx_gpu->base.base; struct drm_device *dev = gpu->dev; @@ -146,7 +147,7 @@ void a6xx_preempt_irq(struct msm_gpu *gpu) return; /* Delete the preemption watchdog timer */ - del_timer(&a6xx_gpu->preempt_timer); + timer_delete(&a6xx_gpu->preempt_timer); /* * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 236b25c094cd..16e7ac444efd 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -24,6 +24,10 @@ int enable_preemption = -1; MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))"); module_param(enable_preemption, int, 0600); +bool disable_acd; +MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD"); +module_param_unsafe(disable_acd, bool, 0400); + extern const struct adreno_gpulist a2xx_gpulist; extern const struct adreno_gpulist a3xx_gpulist; extern const struct adreno_gpulist a4xx_gpulist; @@ -133,9 +137,8 @@ err_disable_rpm: return NULL; } -static int find_chipid(struct device *dev, uint32_t *chipid) +static int find_chipid(struct device_node *node, uint32_t *chipid) { - struct device_node *node = dev->of_node; const char *compat; int ret; @@ -169,15 +172,36 @@ static int find_chipid(struct device *dev, uint32_t *chipid) /* and if that fails, fall back to legacy "qcom,chipid" property: */ ret = of_property_read_u32(node, "qcom,chipid", chipid); if (ret) { - DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret); + DRM_ERROR("%pOF: could not parse qcom,chipid: %d\n", + node, ret); return ret; } - dev_warn(dev, "Using legacy qcom,chipid binding!\n"); + pr_warn("%pOF: Using legacy qcom,chipid binding!\n", node); return 0; } +bool adreno_has_gpu(struct device_node *node) +{ + const struct adreno_info *info; + uint32_t chip_id; + int ret; + + ret = find_chipid(node, &chip_id); + if (ret) + return false; + + info = adreno_info(chip_id); + if (!info) { + pr_warn("%pOF: Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n", + node, ADRENO_CHIPID_ARGS(chip_id)); + return false; + } + + return true; +} + static int adreno_bind(struct device *dev, struct device *master, void *data) { static struct adreno_platform_config config = {}; @@ -187,19 +211,18 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) struct msm_gpu *gpu; int ret; - ret = find_chipid(dev, &config.chip_id); - if (ret) + ret = find_chipid(dev->of_node, &config.chip_id); + /* We shouldn't have gotten this far if we can't parse the chip_id */ + if (WARN_ON(ret)) return ret; dev->platform_data = &config; priv->gpu_pdev = to_platform_device(dev); info = adreno_info(config.chip_id); - if (!info) { - dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n", - ADRENO_CHIPID_ARGS(config.chip_id)); + /* We shouldn't have gotten this far if we don't recognize the GPU: */ + if (WARN_ON(!info)) return -ENXIO; - } config.info = info; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 1238f3265978..86bff915c3e7 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -236,34 +236,77 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu, u64 adreno_private_address_space_size(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); + const struct io_pgtable_cfg *ttbr1_cfg; if (address_space_size) return address_space_size; - if (adreno_gpu->info->address_space_size) - return adreno_gpu->info->address_space_size; + if (adreno_gpu->info->quirks & ADRENO_QUIRK_4GB_VA) + return SZ_4G; - return SZ_4G; + if (!adreno_smmu || !adreno_smmu->get_ttbr1_cfg) + return SZ_4G; + + ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); + + /* + * Userspace VM is actually using TTBR0, but both are the same size, + * with b48 (sign bit) selecting which TTBRn to use. So if IAS is + * 48, the total (kernel+user) address space size is effectively + * 49 bits. But what userspace is control of is the lower 48. + */ + return BIT(ttbr1_cfg->ias) - ADRENO_VM_START; +} + +void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + struct msm_drm_private *priv = gpu->dev->dev_private; + unsigned long flags; + + /* + * Wait until the cooldown period has passed and we would actually + * collect a crashdump to re-enable stall-on-fault. + */ + spin_lock_irqsave(&priv->fault_stall_lock, flags); + if (!priv->stall_enabled && + ktime_after(ktime_get(), priv->stall_reenable_time) && + !READ_ONCE(gpu->crashstate)) { + priv->stall_enabled = true; + + gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, true); + } + spin_unlock_irqrestore(&priv->fault_stall_lock, flags); } #define ARM_SMMU_FSR_TF BIT(1) #define ARM_SMMU_FSR_PF BIT(3) #define ARM_SMMU_FSR_EF BIT(4) +#define ARM_SMMU_FSR_SS BIT(30) int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, struct adreno_smmu_fault_info *info, const char *block, u32 scratch[4]) { + struct msm_drm_private *priv = gpu->dev->dev_private; const char *type = "UNKNOWN"; - bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); + bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) && + !READ_ONCE(gpu->crashstate); + unsigned long irq_flags; /* - * If we aren't going to be resuming later from fault_worker, then do - * it now. + * In case there is a subsequent storm of pagefaults, disable + * stall-on-fault for at least half a second. */ - if (!do_devcoredump) { - gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); + spin_lock_irqsave(&priv->fault_stall_lock, irq_flags); + if (priv->stall_enabled) { + priv->stall_enabled = false; + + gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, false); } + priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500); + spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags); /* * Print a default message if we couldn't get the data from the @@ -291,16 +334,18 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, scratch[0], scratch[1], scratch[2], scratch[3]); if (do_devcoredump) { + struct msm_gpu_fault_info fault_info = {}; + /* Turn off the hangcheck timer to keep it from bothering us */ - del_timer(&gpu->hangcheck_timer); + timer_delete(&gpu->hangcheck_timer); - gpu->fault_info.ttbr0 = info->ttbr0; - gpu->fault_info.iova = iova; - gpu->fault_info.flags = flags; - gpu->fault_info.type = type; - gpu->fault_info.block = block; + fault_info.ttbr0 = info->ttbr0; + fault_info.iova = iova; + fault_info.flags = flags; + fault_info.type = type; + fault_info.block = block; - kthread_queue_work(gpu->worker, &gpu->fault_work); + msm_gpu_fault_crashstate_capture(gpu, &fault_info); } return 0; @@ -883,6 +928,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ"); drm_printf(p, " - type=%s\n", info->type); drm_printf(p, " - source=%s\n", info->block); + + /* Information extracted from what we think are the current + * pgtables. Hopefully the TTBR0 matches what we've extracted + * from the SMMU registers in smmu_info! + */ + drm_puts(p, "pgtable-fault-info:\n"); + drm_printf(p, " - ttbr0: %.16llx\n", (u64)info->pgtbl_ttbr0); + drm_printf(p, " - asid: %d\n", info->asid); + drm_printf(p, " - ptes: %.16llx %.16llx %.16llx %.16llx\n", + info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]); } drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index dcf454629ce0..bc063594a359 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -57,6 +57,7 @@ enum adreno_family { #define ADRENO_QUIRK_HAS_HW_APRIV BIT(3) #define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4) #define ADRENO_QUIRK_PREEMPTION BIT(5) +#define ADRENO_QUIRK_4GB_VA BIT(6) /* Helper for formating the chip_id in the way that userspace tools like * crashdec expect. @@ -104,7 +105,6 @@ struct adreno_info { union { const struct a6xx_info *a6xx; }; - u64 address_space_size; /** * @speedbins: Optional table of fuse to speedbin mappings * @@ -442,6 +442,11 @@ static inline int adreno_is_a621(const struct adreno_gpu *gpu) return gpu->info->chip_ids[0] == 0x06020100; } +static inline int adreno_is_a623(const struct adreno_gpu *gpu) +{ + return gpu->info->chip_ids[0] == 0x06020300; +} + static inline int adreno_is_a630(const struct adreno_gpu *gpu) { return adreno_is_revn(gpu, 630); @@ -573,6 +578,8 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu) adreno_is_a740_family(gpu); } +/* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */ +#define ADRENO_VM_START 0x100000000ULL u64 adreno_private_address_space_size(struct msm_gpu *gpu); int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, uint32_t param, uint64_t *value, uint32_t *len); @@ -629,6 +636,8 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, struct adreno_smmu_fault_info *info, const char *block, u32 scratch[4]); +void adreno_check_and_reenable_stall(struct adreno_gpu *gpu); + int adreno_read_speedbin(struct device *dev, u32 *speedbin); /* diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h index bcb39807fe61..ffc4d4257ae5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h @@ -27,17 +27,16 @@ static const struct dpu_mdp_cfg sm8650_mdp = { }, }; -/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ static const struct dpu_ctl_cfg sm8650_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x15000, .len = 0x1000, - .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = CTL_SM8550_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x16000, .len = 0x1000, - .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = CTL_SM8550_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -343,8 +342,8 @@ static const struct dpu_wb_cfg sm8650_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, .vbif_idx = VBIF_RT, .maxlinewidth = 4096, @@ -452,6 +451,7 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = { .mdss_ver = &sm8650_mdss_ver, .caps = &sm8650_dpu_caps, .mdp = &sm8650_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm8650_ctl), .ctl = sm8650_ctl, .sspp_count = ARRAY_SIZE(sm8650_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h index ab3dfb0b374e..39027a21c6fe 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h @@ -100,14 +100,12 @@ static const struct dpu_pingpong_cfg msm8937_pp[] = { { .name = "pingpong_0", .id = PINGPONG_0, .base = 0x70000, .len = 0xd4, - .features = PINGPONG_MSM8996_MASK, .sblk = &msm8996_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), }, { .name = "pingpong_1", .id = PINGPONG_1, .base = 0x70800, .len = 0xd4, - .features = PINGPONG_MSM8996_MASK, .sblk = &msm8996_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), @@ -132,7 +130,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x268, @@ -141,7 +138,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, }; @@ -190,6 +186,7 @@ const struct dpu_mdss_cfg dpu_msm8937_cfg = { .mdss_ver = &msm8937_mdss_ver, .caps = &msm8937_dpu_caps, .mdp = msm8937_mdp, + .cdm = &dpu_cdm_1_x_4_x, .ctl_count = ARRAY_SIZE(msm8937_ctl), .ctl = msm8937_ctl, .sspp_count = ARRAY_SIZE(msm8937_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h index 6bdaecca6761..8d1b43ea1663 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h @@ -93,7 +93,6 @@ static const struct dpu_pingpong_cfg msm8917_pp[] = { { .name = "pingpong_0", .id = PINGPONG_0, .base = 0x70000, .len = 0xd4, - .features = PINGPONG_MSM8996_MASK, .sblk = &msm8996_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), @@ -118,7 +117,6 @@ static const struct dpu_intf_cfg msm8917_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, }; @@ -167,6 +165,7 @@ const struct dpu_mdss_cfg dpu_msm8917_cfg = { .mdss_ver = &msm8917_mdss_ver, .caps = &msm8917_dpu_caps, .mdp = msm8917_mdp, + .cdm = &dpu_cdm_1_x_4_x, .ctl_count = ARRAY_SIZE(msm8917_ctl), .ctl = msm8917_ctl, .sspp_count = ARRAY_SIZE(msm8917_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h index 14f36ea6ad0e..16c12499b24b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h @@ -100,14 +100,12 @@ static const struct dpu_pingpong_cfg msm8953_pp[] = { { .name = "pingpong_0", .id = PINGPONG_0, .base = 0x70000, .len = 0xd4, - .features = PINGPONG_MSM8996_MASK, .sblk = &msm8996_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), }, { .name = "pingpong_1", .id = PINGPONG_1, .base = 0x70800, .len = 0xd4, - .features = PINGPONG_MSM8996_MASK, .sblk = &msm8996_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), @@ -131,7 +129,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x268, @@ -140,7 +137,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x268, @@ -149,7 +145,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = { .prog_fetch_lines_worst_case = 14, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, }; @@ -198,6 +193,7 @@ const struct dpu_mdss_cfg dpu_msm8953_cfg = { .mdss_ver = &msm8953_mdss_ver, .caps = &msm8953_dpu_caps, .mdp = msm8953_mdp, + .cdm = &dpu_cdm_1_x_4_x, .ctl_count = ARRAY_SIZE(msm8953_ctl), .ctl = msm8953_ctl, .sspp_count = ARRAY_SIZE(msm8953_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h index 491f6f5827d1..91f514d28ac6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h @@ -181,15 +181,15 @@ static const struct dpu_pingpong_cfg msm8996_pp[] = { { .name = "pingpong_0", .id = PINGPONG_0, .base = 0x70000, .len = 0xd4, - .features = PINGPONG_MSM8996_TE2_MASK, - .sblk = &msm8996_pp_sblk_te, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), }, { .name = "pingpong_1", .id = PINGPONG_1, .base = 0x70800, .len = 0xd4, - .features = PINGPONG_MSM8996_TE2_MASK, - .sblk = &msm8996_pp_sblk_te, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), }, { @@ -241,7 +241,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x268, @@ -250,7 +249,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x268, @@ -259,7 +257,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, { .name = "intf_3", .id = INTF_3, .base = 0x6b800, .len = 0x268, @@ -267,7 +264,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = { .prog_fetch_lines_worst_case = 25, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), - .intr_tear_rd_ptr = -1, }, }; @@ -316,6 +312,7 @@ const struct dpu_mdss_cfg dpu_msm8996_cfg = { .mdss_ver = &msm8996_mdss_ver, .caps = &msm8996_dpu_caps, .mdp = msm8996_mdp, + .cdm = &dpu_cdm_1_x_4_x, .ctl_count = ARRAY_SIZE(msm8996_ctl), .ctl = msm8996_ctl, .sspp_count = ARRAY_SIZE(msm8996_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h index 64c94e919a69..413cd59dc0c4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h @@ -170,15 +170,15 @@ static const struct dpu_pingpong_cfg msm8998_pp[] = { { .name = "pingpong_0", .id = PINGPONG_0, .base = 0x70000, .len = 0xd4, - .features = PINGPONG_SDM845_TE2_MASK, - .sblk = &sdm845_pp_sblk_te, + .features = PINGPONG_SDM845_MASK, + .sblk = &sdm845_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), }, { .name = "pingpong_1", .id = PINGPONG_1, .base = 0x70800, .len = 0xd4, - .features = PINGPONG_SDM845_TE2_MASK, - .sblk = &sdm845_pp_sblk_te, + .features = PINGPONG_SDM845_MASK, + .sblk = &sdm845_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), }, { @@ -302,6 +302,7 @@ const struct dpu_mdss_cfg dpu_msm8998_cfg = { .mdss_ver = &msm8998_mdss_ver, .caps = &msm8998_dpu_caps, .mdp = &msm8998_mdp, + .cdm = &dpu_cdm_1_x_4_x, .ctl_count = ARRAY_SIZE(msm8998_ctl), .ctl = msm8998_ctl, .sspp_count = ARRAY_SIZE(msm8998_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h index 424815e7fb7d..b2eb7ca699e3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h @@ -141,15 +141,15 @@ static const struct dpu_pingpong_cfg sdm660_pp[] = { { .name = "pingpong_0", .id = PINGPONG_0, .base = 0x70000, .len = 0xd4, - .features = PINGPONG_SDM845_TE2_MASK, - .sblk = &sdm845_pp_sblk_te, + .features = PINGPONG_SDM845_MASK, + .sblk = &sdm845_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), }, { .name = "pingpong_1", .id = PINGPONG_1, .base = 0x70800, .len = 0xd4, - .features = PINGPONG_SDM845_TE2_MASK, - .sblk = &sdm845_pp_sblk_te, + .features = PINGPONG_SDM845_MASK, + .sblk = &sdm845_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), }, { @@ -202,7 +202,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x280, @@ -211,7 +210,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, { .name = "intf_2", .id = INTF_2, .base = 0x6b000, .len = 0x280, @@ -220,7 +218,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), - .intr_tear_rd_ptr = -1, }, }; @@ -269,6 +266,7 @@ const struct dpu_mdss_cfg dpu_sdm660_cfg = { .mdss_ver = &sdm660_mdss_ver, .caps = &sdm660_dpu_caps, .mdp = &sdm660_mdp, + .cdm = &dpu_cdm_1_x_4_x, .ctl_count = ARRAY_SIZE(sdm660_ctl), .ctl = sdm660_ctl, .sspp_count = ARRAY_SIZE(sdm660_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h index df01227fc364..85e121ad84a0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h @@ -115,14 +115,14 @@ static const struct dpu_pingpong_cfg sdm630_pp[] = { { .name = "pingpong_0", .id = PINGPONG_0, .base = 0x70000, .len = 0xd4, - .features = PINGPONG_SDM845_TE2_MASK, - .sblk = &sdm845_pp_sblk_te, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sdm845_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), }, { .name = "pingpong_2", .id = PINGPONG_2, .base = 0x71000, .len = 0xd4, - .features = PINGPONG_SDM845_MASK, + .features = BIT(DPU_PINGPONG_DITHER), .sblk = &sdm845_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14), @@ -147,7 +147,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), - .intr_tear_rd_ptr = -1, }, { .name = "intf_1", .id = INTF_1, .base = 0x6a800, .len = 0x280, @@ -156,7 +155,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = { .prog_fetch_lines_worst_case = 21, .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), - .intr_tear_rd_ptr = -1, }, }; @@ -205,6 +203,7 @@ const struct dpu_mdss_cfg dpu_sdm630_cfg = { .mdss_ver = &sdm630_mdss_ver, .caps = &sdm630_dpu_caps, .mdp = &sdm630_mdp, + .cdm = &dpu_cdm_1_x_4_x, .ctl_count = ARRAY_SIZE(sdm630_ctl), .ctl = sdm630_ctl, .sspp_count = ARRAY_SIZE(sdm630_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h index 72bd4f7e9e50..49363d7d5b93 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h @@ -194,15 +194,15 @@ static const struct dpu_pingpong_cfg sdm845_pp[] = { { .name = "pingpong_0", .id = PINGPONG_0, .base = 0x70000, .len = 0xd4, - .features = PINGPONG_SDM845_TE2_MASK, - .sblk = &sdm845_pp_sblk_te, + .features = PINGPONG_SDM845_MASK, + .sblk = &sdm845_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), }, { .name = "pingpong_1", .id = PINGPONG_1, .base = 0x70800, .len = 0xd4, - .features = PINGPONG_SDM845_TE2_MASK, - .sblk = &sdm845_pp_sblk_te, + .features = PINGPONG_SDM845_MASK, + .sblk = &sdm845_pp_sblk, .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), }, { @@ -319,6 +319,7 @@ const struct dpu_mdss_cfg dpu_sdm845_cfg = { .mdss_ver = &sdm845_mdss_ver, .caps = &sdm845_dpu_caps, .mdp = &sdm845_mdp, + .cdm = &dpu_cdm_1_x_4_x, .ctl_count = ARRAY_SIZE(sdm845_ctl), .ctl = sdm845_ctl, .sspp_count = ARRAY_SIZE(sdm845_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h index daef07924886..c2fde980fb52 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h @@ -132,6 +132,7 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = { .mdss_ver = &sdm670_mdss_ver, .caps = &sdm845_dpu_caps, .mdp = &sdm670_mdp, + .cdm = &dpu_cdm_1_x_4_x, .ctl_count = ARRAY_SIZE(sdm845_ctl), .ctl = sdm845_ctl, .sspp_count = ARRAY_SIZE(sdm670_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h index 421afacb7248..08d38e1d420c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h @@ -37,17 +37,16 @@ static const struct dpu_mdp_cfg sm8150_mdp = { }, }; -/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ static const struct dpu_ctl_cfg sm8150_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x1000, .len = 0x1e0, - .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x1200, .len = 0x1e0, - .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 4, .type = SSPP_TYPE_VIG, @@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 8, .type = SSPP_TYPE_VIG, @@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 12, .type = SSPP_TYPE_VIG, @@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, @@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, @@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f0, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, @@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { }, { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1f0, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, @@ -297,9 +296,9 @@ static const struct dpu_wb_cfg sm8150_wb[] = { { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, - .features = WB_SDM845_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -388,6 +387,7 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = { .mdss_ver = &sm8150_mdss_ver, .caps = &sm8150_dpu_caps, .mdp = &sm8150_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm8150_ctl), .ctl = sm8150_ctl, .sspp_count = ARRAY_SIZE(sm8150_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h index 641023b102bf..d6f8b1030c68 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h @@ -41,12 +41,12 @@ static const struct dpu_ctl_cfg sc8180x_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x1000, .len = 0x1e0, - .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x1200, .len = 0x1e0, - .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 4, .type = SSPP_TYPE_VIG, @@ -91,7 +91,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 8, .type = SSPP_TYPE_VIG, @@ -99,7 +99,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 12, .type = SSPP_TYPE_VIG, @@ -107,7 +107,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, @@ -115,7 +115,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, @@ -123,7 +123,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f0, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, @@ -131,7 +131,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { }, { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1f0, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, @@ -304,9 +304,9 @@ static const struct dpu_wb_cfg sc8180x_wb[] = { { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, - .features = WB_SDM845_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -414,6 +414,7 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = { .mdss_ver = &sc8180x_mdss_ver, .caps = &sc8180x_dpu_caps, .mdp = &sc8180x_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sc8180x_ctl), .ctl = sc8180x_ctl, .sspp_count = ARRAY_SIZE(sc8180x_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h index 2fe674d1e059..71ba48b05656 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h @@ -38,12 +38,12 @@ static const struct dpu_ctl_cfg sm7150_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x1000, .len = 0x1e0, - .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x1200, .len = 0x1e0, - .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -72,7 +72,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_NO_SDMA, .sblk = &dpu_vig_sblk_qseed3_2_4, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -80,7 +80,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = { }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_NO_SDMA, .sblk = &dpu_vig_sblk_qseed3_2_4, .xin_id = 4, .type = SSPP_TYPE_VIG, @@ -88,7 +88,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = { }, { .name = "sspp_2", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, @@ -96,7 +96,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = { }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, @@ -104,7 +104,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = { }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f0, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, @@ -261,8 +261,8 @@ static const struct dpu_wb_cfg sm7150_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -309,6 +309,7 @@ const struct dpu_mdss_cfg dpu_sm7150_cfg = { .mdss_ver = &sm7150_mdss_ver, .caps = &sm7150_dpu_caps, .mdp = &sm7150_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm7150_ctl), .ctl = sm7150_ctl, .sspp_count = ARRAY_SIZE(sm7150_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h index 621a2140f675..da11830d4407 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h @@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6150_mdp = { [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 }, + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, }, }; @@ -116,14 +117,12 @@ static const struct dpu_lm_cfg sm6150_lm[] = { .sblk = &sdm845_lm_sblk, .pingpong = PINGPONG_0, .dspp = DSPP_0, - .lm_pair = LM_1, }, { .name = "lm_1", .id = LM_1, .base = 0x45000, .len = 0x320, .features = MIXER_QCM2290_MASK, .sblk = &sdm845_lm_sblk, .pingpong = PINGPONG_1, - .lm_pair = LM_0, }, { .name = "lm_2", .id = LM_2, .base = 0x46000, .len = 0x320, @@ -164,6 +163,21 @@ static const struct dpu_pingpong_cfg sm6150_pp[] = { }, }; +static const struct dpu_wb_cfg sm6150_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), + .clk_ctrl = DPU_CLK_CTRL_WB2, + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 2160, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + static const struct dpu_intf_cfg sm6150_intf[] = { { .name = "intf_0", .id = INTF_0, @@ -234,6 +248,7 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = { .mdss_ver = &sm6150_mdss_ver, .caps = &sm6150_dpu_caps, .mdp = &sm6150_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm6150_ctl), .ctl = sm6150_ctl, .sspp_count = ARRAY_SIZE(sm6150_sspp), @@ -244,6 +259,8 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = { .dspp = sm6150_dspp, .pingpong_count = ARRAY_SIZE(sm6150_pp), .pingpong = sm6150_pp, + .wb_count = ARRAY_SIZE(sm6150_wb), + .wb = sm6150_wb, .intf_count = ARRAY_SIZE(sm6150_intf), .intf = sm6150_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h index d039b96beb97..fcfb3774f7a1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h @@ -69,7 +69,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f0, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_NO_SDMA, .sblk = &dpu_vig_sblk_qseed3_2_4, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, @@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = { }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f0, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, @@ -144,9 +144,9 @@ static const struct dpu_wb_cfg sm6125_wb[] = { { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, - .features = WB_SDM845_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -216,6 +216,7 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = { .mdss_ver = &sm6125_mdss_ver, .caps = &sm6125_dpu_caps, .mdp = &sm6125_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm6125_ctl), .ctl = sm6125_ctl, .sspp_count = ARRAY_SIZE(sm6125_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h index e8916ae826a6..a86fdb33ebdd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h @@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sm8250_mdp = { }, }; -/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ static const struct dpu_ctl_cfg sm8250_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x1000, .len = 0x1e0, - .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x1200, .len = 0x1e0, - .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -386,7 +385,7 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = { .mdss_ver = &sm8250_mdss_ver, .caps = &sm8250_dpu_caps, .mdp = &sm8250_mdp, - .cdm = &sc7280_cdm, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm8250_ctl), .ctl = sm8250_ctl, .sspp_count = ARRAY_SIZE(sm8250_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h index 7382ebb6e5b2..842fcc5887fe 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h @@ -51,7 +51,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_NO_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -59,7 +59,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, @@ -67,7 +67,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f8, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, @@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f8, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, @@ -157,8 +157,8 @@ static const struct dpu_wb_cfg sc7180_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -204,6 +204,7 @@ const struct dpu_mdss_cfg dpu_sc7180_cfg = { .mdss_ver = &sc7180_mdss_ver, .caps = &sc7180_dpu_caps, .mdp = &sc7180_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sc7180_ctl), .ctl = sc7180_ctl, .sspp_count = ARRAY_SIZE(sc7180_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h index 43f64a005f5a..c5fd89dd7c89 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h @@ -38,7 +38,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_NO_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -46,7 +46,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h index 0502cee2f116..a234bb289d24 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h @@ -59,7 +59,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_NO_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -67,7 +67,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, @@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = { }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f8, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, @@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = { }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f8, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, @@ -151,8 +151,8 @@ static const struct dpu_wb_cfg sm6350_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -222,6 +222,7 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = { .mdss_ver = &sm6350_mdss_ver, .caps = &sm6350_dpu_caps, .mdp = &sm6350_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm6350_ctl), .ctl = sm6350_ctl, .sspp_count = ARRAY_SIZE(sm6350_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h index 3cbb2fe8aba2..53f3be28f6f6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h @@ -46,7 +46,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h index a06c8634d2d7..3a3bc8e429be 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h @@ -39,7 +39,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_NO_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -47,7 +47,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_NO_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h index f7c08e89c882..90e86063a372 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h @@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sm8350_mdp = { }, }; -/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ static const struct dpu_ctl_cfg sm8350_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x15000, .len = 0x1e8, - .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .features = CTL_SC7280_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x16000, .len = 0x1e8, - .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .features = CTL_SC7280_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -305,8 +304,8 @@ static const struct dpu_wb_cfg sm8350_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -396,6 +395,7 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = { .mdss_ver = &sm8350_mdss_ver, .caps = &sm8350_dpu_caps, .mdp = &sm8350_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm8350_ctl), .ctl = sm8350_ctl, .sspp_count = ARRAY_SIZE(sm8350_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h index 2f153e0b5c6a..e9625c48c567 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h @@ -248,7 +248,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = { .mdss_ver = &sc7280_mdss_ver, .caps = &sc7280_dpu_caps, .mdp = &sc7280_mdp, - .cdm = &sc7280_cdm, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sc7280_ctl), .ctl = sc7280_ctl, .sspp_count = ARRAY_SIZE(sc7280_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h index 0d143e390eca..139f11321fea 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h @@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sc8280xp_mdp = { }, }; -/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ static const struct dpu_ctl_cfg sc8280xp_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x15000, .len = 0x204, - .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .features = CTL_SC7280_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x16000, .len = 0x204, - .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .features = CTL_SC7280_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -74,7 +73,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x2ac, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, @@ -82,7 +81,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x2ac, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 4, .type = SSPP_TYPE_VIG, @@ -90,7 +89,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x2ac, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 8, .type = SSPP_TYPE_VIG, @@ -98,7 +97,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x2ac, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 12, .type = SSPP_TYPE_VIG, @@ -106,7 +105,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x2ac, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, @@ -114,7 +113,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x2ac, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, @@ -122,7 +121,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x2ac, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, @@ -130,7 +129,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { }, { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x2ac, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, @@ -435,6 +434,7 @@ const struct dpu_mdss_cfg dpu_sc8280xp_cfg = { .mdss_ver = &sc8280xp_mdss_ver, .caps = &sc8280xp_dpu_caps, .mdp = &sc8280xp_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sc8280xp_ctl), .ctl = sc8280xp_ctl, .sspp_count = ARRAY_SIZE(sc8280xp_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h index 08742472f9cc..461294143a90 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h @@ -36,17 +36,16 @@ static const struct dpu_mdp_cfg sm8450_mdp = { }, }; -/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ static const struct dpu_ctl_cfg sm8450_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x15000, .len = 0x204, - .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .features = CTL_SC7280_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x16000, .len = 0x204, - .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .features = CTL_SC7280_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -321,8 +320,8 @@ static const struct dpu_wb_cfg sm8450_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -412,6 +411,7 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = { .mdss_ver = &sm8450_mdss_ver, .caps = &sm8450_dpu_caps, .mdp = &sm8450_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm8450_ctl), .ctl = sm8450_ctl, .sspp_count = ARRAY_SIZE(sm8450_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h index 76ec72a32378..c248b3b55c41 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h @@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sa8775p_mdp = { }, }; -/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ static const struct dpu_ctl_cfg sa8775p_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x15000, .len = 0x204, - .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .features = CTL_SC7280_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x16000, .len = 0x204, - .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .features = CTL_SC7280_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -458,7 +457,7 @@ const struct dpu_mdss_cfg dpu_sa8775p_cfg = { .mdss_ver = &sa8775p_mdss_ver, .caps = &sa8775p_dpu_caps, .mdp = &sa8775p_mdp, - .cdm = &sc7280_cdm, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sa8775p_ctl), .ctl = sa8775p_ctl, .sspp_count = ARRAY_SIZE(sa8775p_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h index 4d3787fceb72..59c7fdf28e89 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h @@ -27,17 +27,16 @@ static const struct dpu_mdp_cfg sm8550_mdp = { }, }; -/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ static const struct dpu_ctl_cfg sm8550_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x15000, .len = 0x290, - .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = CTL_SM8550_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x16000, .len = 0x290, - .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = CTL_SM8550_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -66,70 +65,70 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x344, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_2, .xin_id = 0, .type = SSPP_TYPE_VIG, }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x344, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_2, .xin_id = 4, .type = SSPP_TYPE_VIG, }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x344, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_2, .xin_id = 8, .type = SSPP_TYPE_VIG, }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x344, - .features = VIG_SDM845_MASK, + .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_3_2, .xin_id = 12, .type = SSPP_TYPE_VIG, }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x344, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x344, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x344, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, }, { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x344, - .features = DMA_SDM845_MASK, + .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, }, { .name = "sspp_12", .id = SSPP_DMA4, .base = 0x2c000, .len = 0x344, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 14, .type = SSPP_TYPE_DMA, }, { .name = "sspp_13", .id = SSPP_DMA5, .base = 0x2e000, .len = 0x344, - .features = DMA_CURSOR_SDM845_MASK, + .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 15, .type = SSPP_TYPE_DMA, @@ -317,8 +316,8 @@ static const struct dpu_wb_cfg sm8550_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, .vbif_idx = VBIF_RT, .maxlinewidth = 4096, @@ -407,6 +406,7 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = { .mdss_ver = &sm8550_mdss_ver, .caps = &sm8550_dpu_caps, .mdp = &sm8550_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(sm8550_ctl), .ctl = sm8550_ctl, .sspp_count = ARRAY_SIZE(sm8550_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h new file mode 100644 index 000000000000..5667d055fbd1 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h @@ -0,0 +1,433 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_9_1_SAR2130P_H +#define _DPU_9_1_SAR2130P_H + +static const struct dpu_caps sar2130p_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 5120, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_mdp_cfg sar2130p_mdp = { + .name = "top_0", + .base = 0, .len = 0x494, + .features = BIT(DPU_MDP_PERIPH_0_REMOVED), + .clk_ctrls = { + [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, + }, +}; + +static const struct dpu_ctl_cfg sar2130p_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x15000, .len = 0x290, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x16000, .len = 0x290, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x17000, .len = 0x290, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x18000, .len = 0x290, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, { + .name = "ctl_4", .id = CTL_4, + .base = 0x19000, .len = 0x290, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), + }, { + .name = "ctl_5", .id = CTL_5, + .base = 0x1a000, .len = 0x290, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23), + }, +}; + +static const struct dpu_sspp_cfg sar2130p_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_2, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_2", .id = SSPP_VIG2, + .base = 0x8000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_2, + .xin_id = 8, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_3", .id = SSPP_VIG3, + .base = 0xa000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_2, + .xin_id = 12, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 1, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 5, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_10", .id = SSPP_DMA2, + .base = 0x28000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 9, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_11", .id = SSPP_DMA3, + .base = 0x2a000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 13, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_12", .id = SSPP_DMA4, + .base = 0x2c000, .len = 0x344, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 14, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_13", .id = SSPP_DMA5, + .base = 0x2e000, .len = 0x344, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 15, + .type = SSPP_TYPE_DMA, + }, +}; + +static const struct dpu_lm_cfg sar2130p_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x320, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_3, + .pingpong = PINGPONG_2, + .dspp = DSPP_2, + }, { + .name = "lm_3", .id = LM_3, + .base = 0x47000, .len = 0x320, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + .dspp = DSPP_3, + }, { + .name = "lm_4", .id = LM_4, + .base = 0x48000, .len = 0x320, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_5, + .pingpong = PINGPONG_4, + }, { + .name = "lm_5", .id = LM_5, + .base = 0x49000, .len = 0x320, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_4, + .pingpong = PINGPONG_5, + }, +}; + +static const struct dpu_dspp_cfg sar2130p_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_2", .id = DSPP_2, + .base = 0x58000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_3", .id = DSPP_3, + .base = 0x5a000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, +}; +static const struct dpu_pingpong_cfg sar2130p_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x69000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x6a000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x6b000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x6c000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + }, { + .name = "pingpong_4", .id = PINGPONG_4, + .base = 0x6d000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), + }, { + .name = "pingpong_5", .id = PINGPONG_5, + .base = 0x6e000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), + }, { + .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0, + .base = 0x66000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, { + .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1, + .base = 0x66400, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, +}; + +static const struct dpu_merge_3d_cfg sar2130p_merge_3d[] = { + { + .name = "merge_3d_0", .id = MERGE_3D_0, + .base = 0x4e000, .len = 0x8, + }, { + .name = "merge_3d_1", .id = MERGE_3D_1, + .base = 0x4f000, .len = 0x8, + }, { + .name = "merge_3d_2", .id = MERGE_3D_2, + .base = 0x50000, .len = 0x8, + }, { + .name = "merge_3d_3", .id = MERGE_3D_3, + .base = 0x66700, .len = 0x8, + }, +}; + +/* + * NOTE: Each display compression engine (DCE) contains dual hard + * slice DSC encoders so both share same base address but with + * its own different sub block address. + */ +static const struct dpu_dsc_cfg sar2130p_dsc[] = { + { + .name = "dce_0_0", .id = DSC_0, + .base = 0x80000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_0_1", .id = DSC_1, + .base = 0x80000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_1, + }, { + .name = "dce_1_0", .id = DSC_2, + .base = 0x81000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_1_1", .id = DSC_3, + .base = 0x81000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_1, + }, +}; + +static const struct dpu_wb_cfg sar2130p_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 4096, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + +static const struct dpu_intf_cfg sar2130p_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x34000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x35000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2), + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x36000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2), + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x37000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + }, +}; + +static const struct dpu_perf_cfg sar2130p_perf_data = { + .max_bw_low = 13600000, + .max_bw_high = 18200000, + .min_core_ib = 2500000, + .min_llcc_ib = 0, + .min_dram_ib = 800000, + .min_prefill_lines = 35, + /* FIXME: lut tables */ + .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0}, + .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sc7180_qos_linear), + .entries = sc7180_qos_linear + }, + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 0, .wr_enable = 0}, + {.rd_enable = 0, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version sar2130p_mdss_ver = { + .core_major_ver = 9, + .core_minor_ver = 1, +}; + +const struct dpu_mdss_cfg dpu_sar2130p_cfg = { + .mdss_ver = &sar2130p_mdss_ver, + .caps = &sar2130p_dpu_caps, + .mdp = &sar2130p_mdp, + .cdm = &dpu_cdm_5_x, + .ctl_count = ARRAY_SIZE(sar2130p_ctl), + .ctl = sar2130p_ctl, + .sspp_count = ARRAY_SIZE(sar2130p_sspp), + .sspp = sar2130p_sspp, + .mixer_count = ARRAY_SIZE(sar2130p_lm), + .mixer = sar2130p_lm, + .dspp_count = ARRAY_SIZE(sar2130p_dspp), + .dspp = sar2130p_dspp, + .pingpong_count = ARRAY_SIZE(sar2130p_pp), + .pingpong = sar2130p_pp, + .dsc_count = ARRAY_SIZE(sar2130p_dsc), + .dsc = sar2130p_dsc, + .merge_3d_count = ARRAY_SIZE(sar2130p_merge_3d), + .merge_3d = sar2130p_merge_3d, + .wb_count = ARRAY_SIZE(sar2130p_wb), + .wb = sar2130p_wb, + .intf_count = ARRAY_SIZE(sar2130p_intf), + .intf = sar2130p_intf, + .vbif_count = ARRAY_SIZE(sm8550_vbif), + .vbif = sm8550_vbif, + .perf = &sar2130p_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h index 6b112e3d17da..52cc10aec1f9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h @@ -26,17 +26,16 @@ static const struct dpu_mdp_cfg x1e80100_mdp = { }, }; -/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ static const struct dpu_ctl_cfg x1e80100_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x15000, .len = 0x290, - .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = CTL_SM8550_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, .base = 0x16000, .len = 0x290, - .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), + .features = CTL_SM8550_MASK, .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, @@ -317,8 +316,8 @@ static const struct dpu_wb_cfg x1e80100_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats_rgb, - .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .xin_id = 6, .vbif_idx = VBIF_RT, .maxlinewidth = 4096, @@ -453,6 +452,7 @@ const struct dpu_mdss_cfg dpu_x1e80100_cfg = { .mdss_ver = &x1e80100_mdss_ver, .caps = &x1e80100_dpu_caps, .mdp = &x1e80100_mdp, + .cdm = &dpu_cdm_5_x, .ctl_count = ARRAY_SIZE(x1e80100_ctl), .ctl = x1e80100_ctl, .sspp_count = ARRAY_SIZE(x1e80100_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 6f0a37f954fe..0fb5789c60d0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -118,26 +118,38 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, return; } - memset(perf, 0, sizeof(struct dpu_core_perf_params)); - - if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) { - perf->bw_ctl = 0; - perf->max_per_pipe_ib = 0; - perf->core_clk_rate = 0; - } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) { - perf->bw_ctl = core_perf->fix_core_ab_vote; - perf->max_per_pipe_ib = core_perf->fix_core_ib_vote; - perf->core_clk_rate = core_perf->fix_core_clk_rate; - } else { - perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); - perf->max_per_pipe_ib = perf_cfg->min_dram_ib; - perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); - } - + perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); + perf->max_per_pipe_ib = perf_cfg->min_dram_ib; + perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); DRM_DEBUG_ATOMIC( - "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", + "crtc=%d clk_rate=%llu core_ib=%u core_ab=%u\n", crtc->base.id, perf->core_clk_rate, - perf->max_per_pipe_ib, perf->bw_ctl); + perf->max_per_pipe_ib, + (u32)DIV_ROUND_UP_ULL(perf->bw_ctl, 1000)); +} + +static void dpu_core_perf_aggregate(struct drm_device *ddev, + enum dpu_crtc_client_type curr_client_type, + struct dpu_core_perf_params *perf) +{ + struct dpu_crtc_state *dpu_cstate; + struct drm_crtc *tmp_crtc; + + drm_for_each_crtc(tmp_crtc, ddev) { + if (tmp_crtc->enabled && + curr_client_type == dpu_crtc_get_client_type(tmp_crtc)) { + dpu_cstate = to_dpu_crtc_state(tmp_crtc->state); + + perf->max_per_pipe_ib = max(perf->max_per_pipe_ib, + dpu_cstate->new_perf.max_per_pipe_ib); + + perf->bw_ctl += dpu_cstate->new_perf.bw_ctl; + + DRM_DEBUG_ATOMIC("crtc=%d bw=%llu\n", + tmp_crtc->base.id, + dpu_cstate->new_perf.bw_ctl); + } + } } /** @@ -150,11 +162,9 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { u32 bw, threshold; - u64 bw_sum_of_intfs = 0; - enum dpu_crtc_client_type curr_client_type; struct dpu_crtc_state *dpu_cstate; - struct drm_crtc *tmp_crtc; struct dpu_kms *kms; + struct dpu_core_perf_params perf = { 0 }; if (!crtc || !state) { DPU_ERROR("invalid crtc\n"); @@ -172,80 +182,56 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, /* obtain new values */ _dpu_core_perf_calc_crtc(&kms->perf, crtc, state, &dpu_cstate->new_perf); - bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl; - curr_client_type = dpu_crtc_get_client_type(crtc); - - drm_for_each_crtc(tmp_crtc, crtc->dev) { - if (tmp_crtc->enabled && - dpu_crtc_get_client_type(tmp_crtc) == curr_client_type && - tmp_crtc != crtc) { - struct dpu_crtc_state *tmp_cstate = - to_dpu_crtc_state(tmp_crtc->state); - - DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n", - tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl, - tmp_cstate->bw_control); + dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); - bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl; - } - - /* convert bandwidth to kb */ - bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000); - DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw); + /* convert bandwidth to kb */ + bw = DIV_ROUND_UP_ULL(perf.bw_ctl, 1000); + DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw); - threshold = kms->perf.perf_cfg->max_bw_high; + threshold = kms->perf.perf_cfg->max_bw_high; - DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold); + DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold); - if (!threshold) { - DPU_ERROR("no bandwidth limits specified\n"); - return -E2BIG; - } else if (bw > threshold) { - DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, - threshold); - return -E2BIG; - } + if (!threshold) { + DPU_ERROR("no bandwidth limits specified\n"); + return -E2BIG; + } else if (bw > threshold) { + DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, + threshold); + return -E2BIG; } return 0; } static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, - struct drm_crtc *crtc) + struct drm_crtc *crtc) { struct dpu_core_perf_params perf = { 0 }; - enum dpu_crtc_client_type curr_client_type - = dpu_crtc_get_client_type(crtc); - struct drm_crtc *tmp_crtc; - struct dpu_crtc_state *dpu_cstate; int i, ret = 0; - u64 avg_bw; + u32 avg_bw; + u32 peak_bw; if (!kms->num_paths) return 0; - drm_for_each_crtc(tmp_crtc, crtc->dev) { - if (tmp_crtc->enabled && - curr_client_type == - dpu_crtc_get_client_type(tmp_crtc)) { - dpu_cstate = to_dpu_crtc_state(tmp_crtc->state); - - perf.max_per_pipe_ib = max(perf.max_per_pipe_ib, - dpu_cstate->new_perf.max_per_pipe_ib); - - perf.bw_ctl += dpu_cstate->new_perf.bw_ctl; + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { + avg_bw = 0; + peak_bw = 0; + } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { + avg_bw = kms->perf.fix_core_ab_vote; + peak_bw = kms->perf.fix_core_ib_vote; + } else { + dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); - DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n", - tmp_crtc->base.id, - dpu_cstate->new_perf.bw_ctl, kms->num_paths); - } + avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/ + peak_bw = perf.max_per_pipe_ib; } - avg_bw = perf.bw_ctl; - do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ + avg_bw /= kms->num_paths; for (i = 0; i < kms->num_paths; i++) - icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); + icc_set_bw(kms->path[i], avg_bw, peak_bw); return ret; } @@ -476,9 +462,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) &perf->core_clk_rate); debugfs_create_u32("enable_bw_release", 0600, entry, (u32 *)&perf->enable_bw_release); - debugfs_create_u32("threshold_low", 0400, entry, + debugfs_create_u32("low_core_ab", 0400, entry, (u32 *)&perf->perf_cfg->max_bw_low); - debugfs_create_u32("threshold_high", 0400, entry, + debugfs_create_u32("max_core_ab", 0400, entry, (u32 *)&perf->perf_cfg->max_bw_high); debugfs_create_u32("min_core_ib", 0400, entry, (u32 *)&perf->perf_cfg->min_core_ib); @@ -490,9 +476,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) (u32 *)perf, &dpu_core_perf_mode_fops); debugfs_create_u64("fix_core_clk_rate", 0600, entry, &perf->fix_core_clk_rate); - debugfs_create_u64("fix_core_ib_vote", 0600, entry, + debugfs_create_u32("fix_core_ib_vote", 0600, entry, &perf->fix_core_ib_vote); - debugfs_create_u64("fix_core_ab_vote", 0600, entry, + debugfs_create_u32("fix_core_ab_vote", 0600, entry, &perf->fix_core_ab_vote); return 0; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h index 451bf8021114..d2f21d34e501 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h @@ -19,7 +19,7 @@ * @core_clk_rate: core clock rate request */ struct dpu_core_perf_params { - u64 max_per_pipe_ib; + u32 max_per_pipe_ib; u64 bw_ctl; u64 core_clk_rate; }; @@ -40,8 +40,8 @@ struct dpu_core_perf_tune { * @perf_tune: debug control for performance tuning * @enable_bw_release: debug control for bandwidth release * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2 - * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2 - * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2 + * @fix_core_ib_vote: fixed core ib vote in KBps used in mode 2 + * @fix_core_ab_vote: fixed core ab vote in KBps used in mode 2 */ struct dpu_core_perf { const struct dpu_perf_cfg *perf_cfg; @@ -50,8 +50,8 @@ struct dpu_core_perf { struct dpu_core_perf_tune perf_tune; u32 enable_bw_release; u64 fix_core_clk_rate; - u64 fix_core_ib_vote; - u64 fix_core_ab_vote; + u32 fix_core_ib_vote; + u32 fix_core_ab_vote; }; int dpu_core_perf_crtc_check(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 7191b1a6d41b..a4b0fe0d9899 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -445,9 +445,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, uint32_t lm_idx; bool bg_alpha_enable = false; - DECLARE_BITMAP(fetch_active, SSPP_MAX); + DECLARE_BITMAP(active_fetch, SSPP_MAX); - memset(fetch_active, 0, sizeof(fetch_active)); + memset(active_fetch, 0, sizeof(active_fetch)); drm_atomic_crtc_for_each_plane(plane, crtc) { state = plane->state; if (!state) @@ -464,7 +464,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) bg_alpha_enable = true; - set_bit(pstate->pipe.sspp->idx, fetch_active); + set_bit(pstate->pipe.sspp->idx, active_fetch); _dpu_crtc_blend_setup_pipe(crtc, plane, mixer, cstate->num_mixers, pstate->stage, @@ -472,7 +472,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, &pstate->pipe, 0, stage_cfg); if (pstate->r_pipe.sspp) { - set_bit(pstate->r_pipe.sspp->idx, fetch_active); + set_bit(pstate->r_pipe.sspp->idx, active_fetch); _dpu_crtc_blend_setup_pipe(crtc, plane, mixer, cstate->num_mixers, pstate->stage, @@ -492,8 +492,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, } } - if (ctl->ops.set_active_pipes) - ctl->ops.set_active_pipes(ctl, fetch_active); + if (ctl->ops.set_active_fetch_pipes) + ctl->ops.set_active_fetch_pipes(ctl, active_fetch); _dpu_crtc_program_lm_output_roi(crtc); } @@ -519,6 +519,8 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) if (mixer[i].lm_ctl->ops.clear_all_blendstages) mixer[i].lm_ctl->ops.clear_all_blendstages( mixer[i].lm_ctl); + if (mixer[i].lm_ctl->ops.set_active_fetch_pipes) + mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL); } /* initialize stage cfg */ @@ -953,6 +955,45 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) return rc; } +static int dpu_crtc_kickoff_clone_mode(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + struct drm_encoder *rt_encoder = NULL, *wb_encoder = NULL; + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); + + /* Find encoder for real time display */ + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) { + if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) + wb_encoder = encoder; + else + rt_encoder = encoder; + } + + if (!rt_encoder || !wb_encoder) { + DRM_DEBUG_ATOMIC("real time or wb encoder not found\n"); + return -EINVAL; + } + + dpu_encoder_prepare_for_kickoff(wb_encoder); + dpu_encoder_prepare_for_kickoff(rt_encoder); + + dpu_vbif_clear_errors(dpu_kms); + + /* + * Kickoff real time encoder last as it's the encoder that + * will do the flush + */ + dpu_encoder_kickoff(wb_encoder); + dpu_encoder_kickoff(rt_encoder); + + /* Don't start frame done timers until the kickoffs have finished */ + dpu_encoder_start_frame_done_timer(wb_encoder); + dpu_encoder_start_frame_done_timer(rt_encoder); + + return 0; +} + /** * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc * @crtc: Pointer to drm crtc object @@ -981,13 +1022,27 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) goto end; } } - /* - * Encoder will flush/start now, unless it has a tx pending. If so, it - * may delay and flush at an irq event (e.g. ppdone) - */ - drm_for_each_encoder_mask(encoder, crtc->dev, - crtc->state->encoder_mask) - dpu_encoder_prepare_for_kickoff(encoder); + + if (drm_crtc_in_clone_mode(crtc->state)) { + if (dpu_crtc_kickoff_clone_mode(crtc)) + goto end; + } else { + /* + * Encoder will flush/start now, unless it has a tx pending. + * If so, it may delay and flush at an irq event (e.g. ppdone) + */ + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) + dpu_encoder_prepare_for_kickoff(encoder); + + dpu_vbif_clear_errors(dpu_kms); + + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) { + dpu_encoder_kickoff(encoder); + dpu_encoder_start_frame_done_timer(encoder); + } + } if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { /* acquire bandwidth and other resources */ @@ -997,11 +1052,6 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) dpu_crtc->play_count++; - dpu_vbif_clear_errors(dpu_kms); - - drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) - dpu_encoder_kickoff(encoder); - reinit_completion(&dpu_crtc->frame_done_comp); end: @@ -1228,6 +1278,149 @@ static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state done: kfree(states); return ret; +} + +#define MAX_CHANNELS_PER_CRTC 2 +#define MAX_HDISPLAY_SPLIT 1080 + +static struct msm_display_topology dpu_crtc_get_topology( + struct drm_crtc *crtc, + struct dpu_kms *dpu_kms, + struct drm_crtc_state *crtc_state) +{ + struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct msm_display_topology topology = {0}; + struct drm_encoder *drm_enc; + + drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) + dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state, + &crtc_state->adjusted_mode); + + topology.cwb_enabled = drm_crtc_in_clone_mode(crtc_state); + + /* + * Datapath topology selection + * + * Dual display + * 2 LM, 2 INTF ( Split display using 2 interfaces) + * + * Single display + * 1 LM, 1 INTF + * 2 LM, 1 INTF (stream merge to support high resolution interfaces) + * + * If DSC is enabled, use 2 LMs for 2:2:1 topology + * + * Add dspps to the reservation requirements if ctm is requested + * + * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not + * enabled. This is because in cases where CWB is enabled, num_intf will + * count both the WB and real-time phys encoders. + * + * For non-DSC CWB usecases, have the num_lm be decided by the + * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check. + */ + + if (topology.num_intf == 2 && !topology.cwb_enabled) + topology.num_lm = 2; + else if (topology.num_dsc == 2) + topology.num_lm = 2; + else if (dpu_kms->catalog->caps->has_3d_merge) + topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; + else + topology.num_lm = 1; + + if (crtc_state->ctm) + topology.num_dspp = topology.num_lm; + + return topology; +} + +static int dpu_crtc_assign_resources(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ + struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_CRTC]; + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_CRTC]; + struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_CRTC]; + int i, num_lm, num_ctl, num_dspp; + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); + struct dpu_global_state *global_state; + struct dpu_crtc_state *cstate; + struct msm_display_topology topology; + int ret; + + /* + * Release and Allocate resources on every modeset + */ + global_state = dpu_kms_get_global_state(crtc_state->state); + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + + dpu_rm_release(global_state, crtc); + + if (!crtc_state->enable) + return 0; + + topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state); + ret = dpu_rm_reserve(&dpu_kms->rm, global_state, + crtc_state->crtc, &topology); + if (ret) + return ret; + + cstate = to_dpu_crtc_state(crtc_state); + + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + crtc_state->crtc, + DPU_HW_BLK_CTL, hw_ctl, + ARRAY_SIZE(hw_ctl)); + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + crtc_state->crtc, + DPU_HW_BLK_LM, hw_lm, + ARRAY_SIZE(hw_lm)); + num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + crtc_state->crtc, + DPU_HW_BLK_DSPP, hw_dspp, + ARRAY_SIZE(hw_dspp)); + + for (i = 0; i < num_lm; i++) { + int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); + + cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); + cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); + if (i < num_dspp) + cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); + } + + cstate->num_mixers = num_lm; + + return 0; +} + +/** + * dpu_crtc_check_mode_changed: check if full modeset is required + * @old_crtc_state: Previous CRTC state + * @new_crtc_state: Corresponding CRTC state to be checked + * + * Check if the changes in the object properties demand full mode set. + */ +int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state) +{ + struct drm_encoder *drm_enc; + struct drm_crtc *crtc = new_crtc_state->crtc; + bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state); + bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state); + + DRM_DEBUG_ATOMIC("%d\n", crtc->base.id); + + /* there might be cases where encoder needs a modeset too */ + drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) { + if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state)) + new_crtc_state->mode_changed = true; + } + + if ((clone_mode_requested && !clone_mode_enabled) || + (!clone_mode_requested && clone_mode_enabled)) + new_crtc_state->mode_changed = true; return 0; } @@ -1247,6 +1440,13 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state); + /* don't reallocate resources if only ACTIVE has beeen changed */ + if (crtc_state->mode_changed || crtc_state->connectors_changed) { + rc = dpu_crtc_assign_resources(crtc, crtc_state); + if (rc < 0) + return rc; + } + if (dpu_use_virtual_planes && (crtc_state->planes_changed || crtc_state->zpos_changed)) { rc = dpu_crtc_reassign_planes(crtc, crtc_state); @@ -1264,10 +1464,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name); - /* force a full mode set if active state changed */ - if (crtc_state->active_changed) - crtc_state->mode_changed = true; - if (cstate->num_mixers) { rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); if (rc) @@ -1486,8 +1682,9 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); seq_printf(s, "core_clk_rate: %llu\n", dpu_crtc->cur_perf.core_clk_rate); - seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl); - seq_printf(s, "max_per_pipe_ib: %llu\n", + seq_printf(s, "bw_ctl: %uk\n", + (u32)DIV_ROUND_UP_ULL(dpu_crtc->cur_perf.bw_ctl, 1000)); + seq_printf(s, "max_per_pipe_ib: %u\n", dpu_crtc->cur_perf.max_per_pipe_ib); return 0; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index 0b148f3ce0d7..94392b9b9245 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -239,6 +239,9 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc) return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL; } +int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state); + int dpu_crtc_vblank(struct drm_crtc *crtc, bool en); void dpu_crtc_vblank_callback(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 5172ab4dea99..c0ed110a7d30 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2013 Red Hat * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. * * Author: Rob Clark <robdclark@gmail.com> */ @@ -24,6 +24,7 @@ #include "dpu_hw_catalog.h" #include "dpu_hw_intf.h" #include "dpu_hw_ctl.h" +#include "dpu_hw_cwb.h" #include "dpu_hw_dspp.h" #include "dpu_hw_dsc.h" #include "dpu_hw_merge3d.h" @@ -58,8 +59,6 @@ #define IDLE_SHORT_TIMEOUT 1 -#define MAX_HDISPLAY_SPLIT 1080 - /* timeout in frames waiting for frame done */ #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 @@ -135,8 +134,12 @@ enum dpu_enc_rc_states { * @cur_slave: As above but for the slave encoder. * @hw_pp: Handle to the pingpong blocks used for the display. No. * pingpong blocks can be different than num_phys_encs. + * @hw_cwb: Handle to the CWB muxes used for concurrent writeback + * display. Number of CWB muxes can be different than + * num_phys_encs. * @hw_dsc: Handle to the DSC blocks used for the display. * @dsc_mask: Bitmask of used DSC blocks. + * @cwb_mask: Bitmask of used CWB muxes * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped * for partial update right-only cases, such as pingpong * split where virtual pingpong does not generate IRQs @@ -179,9 +182,11 @@ struct dpu_encoder_virt { struct dpu_encoder_phys *cur_master; struct dpu_encoder_phys *cur_slave; struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_cwb *hw_cwb[MAX_CHANNELS_PER_ENC]; struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; unsigned int dsc_mask; + unsigned int cwb_mask; bool intfs_swapped; @@ -622,9 +627,9 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) if (dpu_enc->phys_encs[i]) intf_count++; - /* See dpu_encoder_get_topology, we only support 2:2:1 topology */ - if (dpu_enc->dsc) - num_dsc = 2; + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + if (dpu_enc->hw_dsc[i]) + num_dsc++; return (num_dsc > 0) && (num_dsc > intf_count); } @@ -647,130 +652,51 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) return NULL; } -static struct msm_display_topology dpu_encoder_get_topology( - struct dpu_encoder_virt *dpu_enc, - struct dpu_kms *dpu_kms, - struct drm_display_mode *mode, - struct drm_crtc_state *crtc_state, - struct drm_dsc_config *dsc) +void dpu_encoder_update_topology(struct drm_encoder *drm_enc, + struct msm_display_topology *topology, + struct drm_atomic_state *state, + const struct drm_display_mode *adj_mode) { - struct msm_display_topology topology = {0}; - int i, intf_count = 0; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + struct msm_drm_private *priv = dpu_enc->base.dev->dev_private; + struct msm_display_info *disp_info = &dpu_enc->disp_info; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_framebuffer *fb; + struct drm_dsc_config *dsc; + + int i; for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) if (dpu_enc->phys_encs[i]) - intf_count++; - - /* Datapath topology selection - * - * Dual display - * 2 LM, 2 INTF ( Split display using 2 interfaces) - * - * Single display - * 1 LM, 1 INTF - * 2 LM, 1 INTF (stream merge to support high resolution interfaces) - * - * Add dspps to the reservation requirements if ctm is requested - */ - if (intf_count == 2) - topology.num_lm = 2; - else if (!dpu_kms->catalog->caps->has_3d_merge) - topology.num_lm = 1; - else - topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; - - if (crtc_state->ctm) - topology.num_dspp = topology.num_lm; + topology->num_intf++; - topology.num_intf = intf_count; + dsc = dpu_encoder_get_dsc_config(drm_enc); + /* We only support 2 DSC mode (with 2 LM and 1 INTF) */ if (dsc) { /* - * In case of Display Stream Compression (DSC), we would use - * 2 DSC encoders, 2 layer mixers and 1 interface - * this is power optimal and can drive up to (including) 4k - * screens + * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces + * when Display Stream Compression (DSC) is enabled, + * and when enough DSC blocks are available. + * This is power-optimal and can drive up to (including) 4k + * screens. */ - topology.num_dsc = 2; - topology.num_lm = 2; - topology.num_intf = 1; - } - - return topology; -} - -static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms, - struct drm_encoder *drm_enc, - struct dpu_global_state *global_state, - struct drm_crtc_state *crtc_state) -{ - struct dpu_crtc_state *cstate; - struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC]; - int num_lm, num_ctl, num_dspp, i; - - cstate = to_dpu_crtc_state(crtc_state); - - memset(cstate->mixers, 0, sizeof(cstate->mixers)); - - num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); - num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); - num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, - ARRAY_SIZE(hw_dspp)); - - for (i = 0; i < num_lm; i++) { - int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); - - cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); - cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); - cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL; - } - - cstate->num_mixers = num_lm; -} - -static int dpu_encoder_virt_atomic_check( - struct drm_encoder *drm_enc, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct dpu_encoder_virt *dpu_enc; - struct msm_drm_private *priv; - struct dpu_kms *dpu_kms; - struct drm_display_mode *adj_mode; - struct msm_display_topology topology; - struct msm_display_info *disp_info; - struct dpu_global_state *global_state; - struct drm_framebuffer *fb; - struct drm_dsc_config *dsc; - int ret = 0; - - if (!drm_enc || !crtc_state || !conn_state) { - DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", - drm_enc != NULL, crtc_state != NULL, conn_state != NULL); - return -EINVAL; + WARN(topology->num_intf > 2, + "DSC topology cannot support more than 2 interfaces\n"); + if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2) + topology->num_dsc = 2; + else + topology->num_dsc = 1; } - dpu_enc = to_dpu_encoder_virt(drm_enc); - DPU_DEBUG_ENC(dpu_enc, "\n"); - - priv = drm_enc->dev->dev_private; - disp_info = &dpu_enc->disp_info; - dpu_kms = to_dpu_kms(priv->kms); - adj_mode = &crtc_state->adjusted_mode; - global_state = dpu_kms_get_global_state(crtc_state->state); - if (IS_ERR(global_state)) - return PTR_ERR(global_state); - - trace_dpu_enc_atomic_check(DRMID(drm_enc)); - - dsc = dpu_encoder_get_dsc_config(drm_enc); - - topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc); + connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); + if (!connector) + return; + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (!conn_state) + return; /* * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it. @@ -781,34 +707,45 @@ static int dpu_encoder_virt_atomic_check( fb = conn_state->writeback_job->fb; if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) - topology.needs_cdm = true; + topology->num_cdm++; } else if (disp_info->intf_type == INTF_DP) { if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode)) - topology.needs_cdm = true; + topology->num_cdm++; } +} - if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm) - crtc_state->mode_changed = true; - else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm) - crtc_state->mode_changed = true; - /* - * Release and Allocate resources on every modeset - * Dont allocate when active is false. - */ - if (drm_atomic_crtc_needs_modeset(crtc_state)) { - dpu_rm_release(global_state, drm_enc); +bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_framebuffer *fb; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); - if (!crtc_state->active_changed || crtc_state->enable) - ret = dpu_rm_reserve(&dpu_kms->rm, global_state, - drm_enc, crtc_state, &topology); - if (!ret) - dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc, - global_state, crtc_state); - } + if (!drm_enc || !state) + return false; - trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); + connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); + if (!connector) + return false; - return ret; + conn_state = drm_atomic_get_new_connector_state(state, connector); + + /** + * These checks are duplicated from dpu_encoder_update_topology() since + * CRTC and encoder don't hold topology information + */ + if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) { + fb = conn_state->writeback_job->fb; + if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) { + if (!dpu_enc->cur_master->hw_cdm) + return true; + } else { + if (dpu_enc->cur_master->hw_cdm) + return true; + } + } + + return false; } static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, @@ -1219,8 +1156,12 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC]; int num_ctl, num_pp, num_dsc; + int num_cwb = 0; + bool is_cwb_encoder; unsigned int dsc_mask = 0; + unsigned int cwb_mask = 0; int i; if (!drm_enc) { @@ -1233,6 +1174,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); + is_cwb_encoder = drm_crtc_in_clone_mode(crtc_state) && + dpu_enc->disp_info.intf_type == INTF_WB; global_state = dpu_kms_get_existing_global_state(dpu_kms); if (IS_ERR_OR_NULL(global_state)) { @@ -1243,18 +1186,38 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, trace_dpu_enc_mode_set(DRMID(drm_enc)); /* Query resource that have been reserved in atomic check step. */ - num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, - ARRAY_SIZE(hw_pp)); + if (is_cwb_encoder) { + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->crtc, + DPU_HW_BLK_DCWB_PINGPONG, + hw_pp, ARRAY_SIZE(hw_pp)); + num_cwb = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->crtc, + DPU_HW_BLK_CWB, + hw_cwb, ARRAY_SIZE(hw_cwb)); + } else { + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->crtc, + DPU_HW_BLK_PINGPONG, hw_pp, + ARRAY_SIZE(hw_pp)); + } + + for (i = 0; i < num_cwb; i++) { + dpu_enc->hw_cwb[i] = to_dpu_hw_cwb(hw_cwb[i]); + cwb_mask |= BIT(dpu_enc->hw_cwb[i]->idx - CWB_0); + } + + dpu_enc->cwb_mask = cwb_mask; + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); + drm_enc->crtc, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) : NULL; num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_DSC, + drm_enc->crtc, DPU_HW_BLK_DSC, hw_dsc, ARRAY_SIZE(hw_dsc)); for (i = 0; i < num_dsc; i++) { dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); @@ -1268,7 +1231,7 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, struct dpu_hw_blk *hw_cdm = NULL; dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_CDM, + drm_enc->crtc, DPU_HW_BLK_CDM, &hw_cdm, 1); dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; } @@ -1283,7 +1246,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, return; } - phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL; + /* Use first (and only) CTL if active CTLs are supported */ + if (num_ctl == 1) + phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[0]); + else + phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL; if (!phys->hw_ctl) { DPU_ERROR_ENC(dpu_enc, "no ctl block assigned at idx: %d\n", i); @@ -1447,7 +1414,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, /* after phys waits for frame-done, should be no more frames pending */ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); - del_timer_sync(&dpu_enc->frame_done_timer); + timer_delete_sync(&dpu_enc->frame_done_timer); } dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); @@ -1619,7 +1586,7 @@ void dpu_encoder_frame_done_callback( if (!dpu_enc->frame_busy_mask[0]) { atomic_set(&dpu_enc->frame_done_timeout_ms, 0); - del_timer(&dpu_enc->frame_done_timer); + timer_delete(&dpu_enc->frame_done_timer); dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_FRAME_DONE); @@ -1654,6 +1621,7 @@ static void dpu_encoder_off_work(struct work_struct *work) static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) { + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); struct dpu_hw_ctl *ctl; int pending_kickoff_cnt; u32 ret = UINT_MAX; @@ -1671,6 +1639,15 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); + /* Return early if encoder is writeback and in clone mode */ + if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL && + dpu_enc->cwb_mask) { + DPU_DEBUG("encoder %d skip flush for concurrent writeback encoder\n", + DRMID(drm_enc)); + return; + } + + if (extra_flush_bits && ctl->ops.update_pending_flush) ctl->ops.update_pending_flush(ctl, extra_flush_bits); @@ -1693,6 +1670,8 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, */ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) { + struct dpu_encoder_virt *dpu_enc; + if (!phys) { DPU_ERROR("invalid argument(s)\n"); return; @@ -1703,6 +1682,14 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) return; } + dpu_enc = to_dpu_encoder_virt(phys->parent); + + if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL && + dpu_enc->cwb_mask) { + DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent)); + return; + } + if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) phys->ops.trigger_start(phys); } @@ -2020,7 +2007,6 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl, static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, struct drm_dsc_config *dsc) { - /* coding only for 2LM, 2enc, 1 dsc config */ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; struct dpu_hw_ctl *ctl = enc_master->hw_ctl; struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; @@ -2030,22 +2016,24 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, int dsc_common_mode; int pic_width; u32 initial_lines; + int num_dsc = 0; int i; for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_pp[i] = dpu_enc->hw_pp[i]; hw_dsc[i] = dpu_enc->hw_dsc[i]; - if (!hw_pp[i] || !hw_dsc[i]) { - DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n"); - return; - } + if (!hw_pp[i] || !hw_dsc[i]) + break; + + num_dsc++; } - dsc_common_mode = 0; pic_width = dsc->pic_width; - dsc_common_mode = DSC_MODE_SPLIT_PANEL; + dsc_common_mode = 0; + if (num_dsc > 1) + dsc_common_mode |= DSC_MODE_SPLIT_PANEL; if (dpu_encoder_use_dsc_merge(enc_master->parent)) dsc_common_mode |= DSC_MODE_MULTIPLEX; if (enc_master->intf_mode == INTF_MODE_VIDEO) @@ -2054,14 +2042,10 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, this_frame_slices = pic_width / dsc->slice_width; intf_ip_w = this_frame_slices * dsc->slice_width; - /* - * dsc merge case: when using 2 encoders for the same stream, - * no. of slices need to be same on both the encoders. - */ - enc_ip_w = intf_ip_w / 2; + enc_ip_w = intf_ip_w / num_dsc; initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w); - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + for (i = 0; i < num_dsc; i++) dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines); } @@ -2135,6 +2119,25 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) } /** + * dpu_encoder_start_frame_done_timer - Start the encoder frame done timer + * @drm_enc: Pointer to drm encoder structure + */ +void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + unsigned long timeout_ms; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / + drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); + + atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); + mod_timer(&dpu_enc->frame_done_timer, + jiffies + msecs_to_jiffies(timeout_ms)); + +} + +/** * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path * (i.e. ctl flush and start) immediately. * @drm_enc: encoder pointer @@ -2143,7 +2146,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; struct dpu_encoder_phys *phys; - unsigned long timeout_ms; unsigned int i; DPU_ATRACE_BEGIN("encoder_kickoff"); @@ -2151,13 +2153,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) trace_dpu_enc_kickoff(DRMID(drm_enc)); - timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / - drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); - - atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); - mod_timer(&dpu_enc->frame_done_timer, - jiffies + msecs_to_jiffies(timeout_ms)); - /* All phys encs are ready to go, trigger the kickoff */ _dpu_encoder_kickoff_phys(dpu_enc); @@ -2183,22 +2178,25 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) memset(&mixer, 0, sizeof(mixer)); /* reset all mixers for this encoder */ - if (phys_enc->hw_ctl->ops.clear_all_blendstages) - phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl); + if (ctl->ops.clear_all_blendstages) + ctl->ops.clear_all_blendstages(ctl); global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms); num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state, - phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); + phys_enc->parent->crtc, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); for (i = 0; i < num_lm; i++) { hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); - if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) - phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); + if (ctl->ops.update_pending_flush_mixer) + ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); /* clear all blendstages */ - if (phys_enc->hw_ctl->ops.setup_blendstage) - phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); + if (ctl->ops.setup_blendstage) + ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); + + if (ctl->ops.set_active_fetch_pipes) + ctl->ops.set_active_fetch_pipes(ctl, NULL); } } @@ -2250,7 +2248,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) dpu_enc = to_dpu_encoder_virt(phys_enc->parent); - phys_enc->hw_ctl->ops.reset(ctl); + ctl->ops.reset(ctl); dpu_encoder_helper_reset_mixers(phys_enc); @@ -2265,8 +2263,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE); /* mark WB flush as pending */ - if (phys_enc->hw_ctl->ops.update_pending_flush_wb) - phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); + if (ctl->ops.update_pending_flush_wb) + ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); } else { for (i = 0; i < dpu_enc->num_phys_encs; i++) { if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk) @@ -2275,18 +2273,24 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) PINGPONG_NONE); /* mark INTF flush as pending */ - if (phys_enc->hw_ctl->ops.update_pending_flush_intf) - phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl, + if (ctl->ops.update_pending_flush_intf) + ctl->ops.update_pending_flush_intf(ctl, dpu_enc->phys_encs[i]->hw_intf->idx); } } + if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither) + phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL); + + if (dpu_enc->cwb_mask) + dpu_encoder_helper_phys_setup_cwb(phys_enc, false); + /* reset the merge 3D HW block */ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, BLEND_3D_NONE); - if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) - phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl, + if (ctl->ops.update_pending_flush_merge_3d) + ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx); } @@ -2294,9 +2298,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp) phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm, PINGPONG_NONE); - if (phys_enc->hw_ctl->ops.update_pending_flush_cdm) - phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl, - phys_enc->hw_cdm->idx); + if (ctl->ops.update_pending_flush_cdm) + ctl->ops.update_pending_flush_cdm(ctl, + phys_enc->hw_cdm->idx); } if (dpu_enc->dsc) { @@ -2307,6 +2311,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); + intf_cfg.cwb = dpu_enc->cwb_mask; if (phys_enc->hw_intf) intf_cfg.intf = phys_enc->hw_intf->idx; @@ -2324,6 +2329,68 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) ctl->ops.clear_pending_flush(ctl); } +void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc, + bool enable) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys_enc->parent); + struct dpu_hw_cwb *hw_cwb; + struct dpu_hw_ctl *hw_ctl; + struct dpu_hw_cwb_setup_cfg cwb_cfg; + + struct dpu_kms *dpu_kms; + struct dpu_global_state *global_state; + struct dpu_hw_blk *rt_pp_list[MAX_CHANNELS_PER_ENC]; + int num_pp; + + if (!phys_enc->hw_wb) + return; + + hw_ctl = phys_enc->hw_ctl; + + if (!phys_enc->hw_ctl) { + DPU_DEBUG("[wb:%d] no ctl assigned\n", + phys_enc->hw_wb->idx - WB_0); + return; + } + + dpu_kms = phys_enc->dpu_kms; + global_state = dpu_kms_get_existing_global_state(dpu_kms); + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + phys_enc->parent->crtc, + DPU_HW_BLK_PINGPONG, rt_pp_list, + ARRAY_SIZE(rt_pp_list)); + + if (num_pp == 0 || num_pp > MAX_CHANNELS_PER_ENC) { + DPU_DEBUG_ENC(dpu_enc, "invalid num_pp %d\n", num_pp); + return; + } + + /* + * The CWB mux supports using LM or DSPP as tap points. For now, + * always use LM tap point + */ + cwb_cfg.input = INPUT_MODE_LM_OUT; + + for (int i = 0; i < MAX_CHANNELS_PER_ENC; i++) { + hw_cwb = dpu_enc->hw_cwb[i]; + if (!hw_cwb) + continue; + + if (enable) { + struct dpu_hw_pingpong *hw_pp = + to_dpu_hw_pingpong(rt_pp_list[i]); + cwb_cfg.pp_idx = hw_pp->idx; + } else { + cwb_cfg.pp_idx = PINGPONG_NONE; + } + + hw_cwb->ops.config_cwb(hw_cwb, &cwb_cfg); + + if (hw_ctl->ops.update_pending_flush_cwb) + hw_ctl->ops.update_pending_flush_cwb(hw_ctl, hw_cwb->idx); + } +} + /** * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block * @phys_enc: Pointer to physical encoder @@ -2510,6 +2577,38 @@ static int dpu_encoder_virt_add_phys_encs( return 0; } +/** + * dpu_encoder_get_clones - Calculate the possible_clones for DPU encoder + * @drm_enc: DRM encoder pointer + * Returns: possible_clones mask + */ +uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc) +{ + struct drm_encoder *curr; + int type = drm_enc->encoder_type; + uint32_t clone_mask = drm_encoder_mask(drm_enc); + + /* + * Set writeback as possible clones of real-time DSI encoders and vice + * versa + * + * Writeback encoders can't be clones of each other and DSI + * encoders can't be clones of each other. + * + * TODO: Add DP encoders as valid possible clones for writeback encoders + * (and vice versa) once concurrent writeback has been validated for DP + */ + drm_for_each_encoder(curr, drm_enc->dev) { + if ((type == DRM_MODE_ENCODER_VIRTUAL && + curr->encoder_type == DRM_MODE_ENCODER_DSI) || + (type == DRM_MODE_ENCODER_DSI && + curr->encoder_type == DRM_MODE_ENCODER_VIRTUAL)) + clone_mask |= drm_encoder_mask(curr); + } + + return clone_mask; +} + static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, struct dpu_kms *dpu_kms, struct msm_display_info *disp_info) @@ -2594,8 +2693,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, static void dpu_encoder_frame_done_timeout(struct timer_list *t) { - struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, - frame_done_timer); + struct dpu_encoder_virt *dpu_enc = timer_container_of(dpu_enc, t, + frame_done_timer); struct drm_encoder *drm_enc = &dpu_enc->base; u32 event; @@ -2627,7 +2726,6 @@ static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, .atomic_disable = dpu_encoder_virt_atomic_disable, .atomic_enable = dpu_encoder_virt_atomic_enable, - .atomic_check = dpu_encoder_virt_atomic_check, }; static const struct drm_encoder_funcs dpu_encoder_funcs = { @@ -2786,6 +2884,18 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) } /** + * dpu_encoder_helper_get_cwb_mask - get CWB blocks mask for the DPU encoder + * @phys_enc: Pointer to physical encoder structure + */ +unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc) +{ + struct drm_encoder *encoder = phys_enc->parent; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); + + return dpu_enc->cwb_mask; +} + +/** * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder * This helper function is used by physical encoder to get DSC blocks mask * used for this encoder. diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index 92b5ee390788..ca1ca2e51d7e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> @@ -60,6 +60,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder); void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder); +uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc); + struct drm_encoder *dpu_encoder_init(struct drm_device *dev, int drm_enc_mode, struct msm_display_info *disp_info); @@ -80,6 +82,13 @@ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos); bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc); +void dpu_encoder_update_topology(struct drm_encoder *drm_enc, + struct msm_display_topology *topology, + struct drm_atomic_state *state, + const struct drm_display_mode *adj_mode); + +bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state); + void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job); @@ -88,4 +97,5 @@ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc); +void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc); #endif /* __DPU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index 63f09857025c..61b22d949454 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. */ @@ -309,6 +309,8 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( return BLEND_3D_NONE; } +unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc); + unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc); struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc); @@ -331,6 +333,9 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc); +void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc, + bool enable); + void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, const struct msm_format *dpu_fmt, u32 output_type); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index e9bbccc44dad..a0ba55ab3c89 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -5,6 +5,7 @@ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ #include <linux/delay.h> +#include <linux/string_choices.h> #include "dpu_encoder_phys.h" #include "dpu_hw_interrupts.h" #include "dpu_hw_pingpong.h" @@ -59,6 +60,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg( return; intf_cfg.intf = phys_enc->hw_intf->idx; + if (phys_enc->split_role == ENC_ROLE_MASTER) + intf_cfg.intf_master = phys_enc->hw_intf->idx; intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD; intf_cfg.stream_sel = cmd_enc->stream_sel; intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); @@ -261,7 +264,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq( DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, - enable ? "true" : "false", refcount); + str_true_false(enable), refcount); if (enable) { if (phys_enc->vblank_refcount == 0) @@ -285,7 +288,7 @@ end: DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n", DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, ret, - enable ? "true" : "false", refcount); + str_true_false(enable), refcount); } return ret; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index abd6600046cb..1c468ca5d692 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -94,17 +94,21 @@ static void drm_mode_to_intf_timing_params( timing->vsync_polarity = 0; } - /* for DP/EDP, Shift timings to align it to bottom right */ - if (phys_enc->hw_intf->cap->type == INTF_DP) { + timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent); + timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent); + + /* + * For DP/EDP, Shift timings to align it to bottom right. + * wide_bus_en is set for everything excluding SDM845 & + * porch changes cause DisplayPort failure and HDMI tearing. + */ + if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) { timing->h_back_porch += timing->h_front_porch; timing->h_front_porch = 0; timing->v_back_porch += timing->v_front_porch; timing->v_front_porch = 0; } - timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent); - timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent); - /* * for DP, divide the horizonal parameters by 2 when * widebus is enabled @@ -298,6 +302,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine( if (phys_enc->hw_cdm) intf_cfg.cdm = phys_enc->hw_cdm->idx; intf_cfg.intf = phys_enc->hw_intf->idx; + if (phys_enc->split_role == ENC_ROLE_MASTER) + intf_cfg.intf_master = phys_enc->hw_intf->idx; intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID; intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); @@ -372,7 +378,8 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg) static bool dpu_encoder_phys_vid_needs_single_flush( struct dpu_encoder_phys *phys_enc) { - return phys_enc->split_role != ENC_ROLE_SOLO; + return !(phys_enc->hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG)) && + phys_enc->split_role != ENC_ROLE_SOLO; } static void dpu_encoder_phys_vid_atomic_mode_set( diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 4c006ec74575..849fea580a4c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ @@ -68,7 +68,7 @@ static void dpu_encoder_phys_wb_set_ot_limit( ot_params.num = hw_wb->idx - WB_0; ot_params.width = phys_enc->cached_mode.hdisplay; ot_params.height = phys_enc->cached_mode.vdisplay; - ot_params.is_wfd = true; + ot_params.is_wfd = !dpu_encoder_helper_get_cwb_mask(phys_enc); ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode); ot_params.vbif_idx = hw_wb->caps->vbif_idx; ot_params.rd = false; @@ -111,7 +111,7 @@ static void dpu_encoder_phys_wb_set_qos_remap( qos_params.vbif_idx = hw_wb->caps->vbif_idx; qos_params.xin_id = hw_wb->caps->xin_id; qos_params.num = hw_wb->idx - WB_0; - qos_params.is_rt = false; + qos_params.is_rt = dpu_encoder_helper_get_cwb_mask(phys_enc); DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n", qos_params.num, @@ -174,6 +174,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); struct dpu_hw_wb *hw_wb; struct dpu_hw_wb_cfg *wb_cfg; + u32 cdp_usage; if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) { DPU_ERROR("invalid encoder\n"); @@ -182,6 +183,10 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, hw_wb = phys_enc->hw_wb; wb_cfg = &wb_enc->wb_cfg; + if (dpu_encoder_helper_get_cwb_mask(phys_enc)) + cdp_usage = DPU_PERF_CDP_USAGE_RT; + else + cdp_usage = DPU_PERF_CDP_USAGE_NRT; wb_cfg->intf_mode = phys_enc->intf_mode; wb_cfg->roi.x1 = 0; @@ -199,7 +204,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf; hw_wb->ops.setup_cdp(hw_wb, format, - perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable); + perf->cdp_cfg[cdp_usage].wr_enable); } if (hw_wb->ops.setup_outaddress) @@ -236,6 +241,7 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc) intf_cfg.intf = DPU_NONE; intf_cfg.wb = hw_wb->idx; + intf_cfg.cwb = dpu_encoder_helper_get_cwb_mask(phys_enc); if (mode_3d && hw_pp && hw_pp->merge_3d) intf_cfg.merge_3d = hw_pp->merge_3d->idx; @@ -340,6 +346,8 @@ static void dpu_encoder_phys_wb_setup( dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB); + dpu_encoder_helper_phys_setup_cwb(phys_enc, true); + dpu_encoder_phys_wb_setup_ctl(phys_enc); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 0b342c043875..c878fe196aeb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -34,11 +34,11 @@ #define VIG_MSM8998_MASK \ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE)) -#define VIG_SDM845_MASK \ +#define VIG_SDM845_MASK_NO_SDMA \ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE)) #define VIG_SDM845_MASK_SDMA \ - (VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) + (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2)) #define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL)) @@ -54,24 +54,24 @@ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT)) #define VIG_SC7280_MASK \ - (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION)) + (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_INLINE_ROTATION)) #define VIG_SC7280_MASK_SDMA \ (VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) -#define DMA_SDM845_MASK \ +#define DMA_SDM845_MASK_NO_SDMA \ (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT)) -#define DMA_CURSOR_SDM845_MASK \ - (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR)) +#define DMA_CURSOR_SDM845_MASK_NO_SDMA \ + (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_CURSOR)) #define DMA_SDM845_MASK_SDMA \ - (DMA_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) + (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2)) #define DMA_CURSOR_SDM845_MASK_SDMA \ - (DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) + (DMA_CURSOR_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2)) #define DMA_CURSOR_MSM8996_MASK \ (DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR)) @@ -98,15 +98,9 @@ #define PINGPONG_MSM8996_MASK \ (BIT(DPU_PINGPONG_DSC)) -#define PINGPONG_MSM8996_TE2_MASK \ - (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2)) - #define PINGPONG_SDM845_MASK \ (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC)) -#define PINGPONG_SDM845_TE2_MASK \ - (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2)) - #define PINGPONG_SM8150_MASK \ (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC)) @@ -232,37 +226,6 @@ static const u32 rotation_v2_formats[] = { /* TODO add formats after validation */ }; -static const u32 wb2_formats_rgb[] = { - DRM_FORMAT_RGB565, - DRM_FORMAT_BGR565, - DRM_FORMAT_RGB888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_RGBA8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_RGBX8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB1555, - DRM_FORMAT_RGBA5551, - DRM_FORMAT_XRGB1555, - DRM_FORMAT_RGBX5551, - DRM_FORMAT_ARGB4444, - DRM_FORMAT_RGBA4444, - DRM_FORMAT_RGBX4444, - DRM_FORMAT_XRGB4444, - DRM_FORMAT_BGR888, - DRM_FORMAT_BGRA8888, - DRM_FORMAT_BGRX8888, - DRM_FORMAT_ABGR1555, - DRM_FORMAT_BGRA5551, - DRM_FORMAT_XBGR1555, - DRM_FORMAT_BGRX5551, - DRM_FORMAT_ABGR4444, - DRM_FORMAT_BGRA4444, - DRM_FORMAT_BGRX4444, - DRM_FORMAT_XBGR4444, -}; - static const u32 wb2_formats_rgb_yuv[] = { DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, @@ -407,8 +370,6 @@ static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK(); * MIXER sub blocks config *************************************************************/ -/* MSM8998 */ - static const struct dpu_lm_sub_blks msm8998_lm_sblk = { .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .maxblendstages = 7, /* excluding base layer */ @@ -418,8 +379,6 @@ static const struct dpu_lm_sub_blks msm8998_lm_sblk = { }, }; -/* SDM845 */ - static const struct dpu_lm_sub_blks sdm845_lm_sblk = { .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .maxblendstages = 11, /* excluding base layer */ @@ -429,8 +388,6 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = { }, }; -/* SC7180 */ - static const struct dpu_lm_sub_blks sc7180_lm_sblk = { .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .maxblendstages = 7, /* excluding base layer */ @@ -439,8 +396,6 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = { }, }; -/* QCM2290 */ - static const struct dpu_lm_sub_blks qcm2290_lm_sblk = { .maxwidth = DEFAULT_DPU_LINE_WIDTH, .maxblendstages = 4, /* excluding base layer */ @@ -465,22 +420,11 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = { /************************************************************* * PINGPONG sub blocks config *************************************************************/ -static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = { - .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, - .version = 0x1}, -}; static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = { /* No dither block */ }; -static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = { - .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, - .version = 0x1}, - .dither = {.name = "dither", .base = 0x30e0, - .len = 0x20, .version = 0x10000}, -}; - static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = { .dither = {.name = "dither", .base = 0x30e0, .len = 0x20, .version = 0x10000}, @@ -507,7 +451,14 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = { /************************************************************* * CDM block config *************************************************************/ -static const struct dpu_cdm_cfg sc7280_cdm = { +static const struct dpu_cdm_cfg dpu_cdm_1_x_4_x = { + .name = "cdm_0", + .id = CDM_0, + .len = 0x224, + .base = 0x79200, +}; + +static const struct dpu_cdm_cfg dpu_cdm_5_x = { .name = "cdm_0", .id = CDM_0, .len = 0x228, @@ -783,7 +734,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { #include "catalog/dpu_8_4_sa8775p.h" #include "catalog/dpu_9_0_sm8550.h" - +#include "catalog/dpu_9_1_sar2130p.h" #include "catalog/dpu_9_2_x1e80100.h" #include "catalog/dpu_10_0_sm8650.h" diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 4cea19e1a203..01dd6e65f777 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -115,7 +115,6 @@ enum { /** * PINGPONG sub-blocks - * @DPU_PINGPONG_TE2 Additional tear check block for split pipes * @DPU_PINGPONG_SPLIT PP block supports split fifo * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo * @DPU_PINGPONG_DITHER Dither blocks @@ -123,8 +122,7 @@ enum { * @DPU_PINGPONG_MAX */ enum { - DPU_PINGPONG_TE2 = 0x1, - DPU_PINGPONG_SPLIT, + DPU_PINGPONG_SPLIT = 0x1, DPU_PINGPONG_SLAVE, DPU_PINGPONG_DITHER, DPU_PINGPONG_DSC, @@ -404,8 +402,6 @@ struct dpu_dspp_sub_blks { }; struct dpu_pingpong_sub_blks { - struct dpu_pp_blk te; - struct dpu_pp_blk te2; struct dpu_pp_blk dither; }; @@ -841,6 +837,7 @@ extern const struct dpu_mdss_cfg dpu_msm8937_cfg; extern const struct dpu_mdss_cfg dpu_msm8953_cfg; extern const struct dpu_mdss_cfg dpu_msm8996_cfg; extern const struct dpu_mdss_cfg dpu_msm8998_cfg; +extern const struct dpu_mdss_cfg dpu_sar2130p_cfg; extern const struct dpu_mdss_cfg dpu_sdm630_cfg; extern const struct dpu_mdss_cfg dpu_sdm660_cfg; extern const struct dpu_mdss_cfg dpu_sdm845_cfg; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c index ae1534c49ae0..3f88c3641d4a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c @@ -214,7 +214,9 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_ mux_cfg = DPU_REG_READ(c, CDM_MUX); mux_cfg &= ~0xf; - if (pp) + if (pp >= PINGPONG_CWB_0) + mux_cfg |= 0xd; + else if (pp) mux_cfg |= (pp - PINGPONG_0) & 0x7; else mux_cfg |= 0xf; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 4893f10d6a58..573e42b06ad0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/delay.h> @@ -31,12 +31,14 @@ #define CTL_MERGE_3D_ACTIVE 0x0E4 #define CTL_DSC_ACTIVE 0x0E8 #define CTL_WB_ACTIVE 0x0EC +#define CTL_CWB_ACTIVE 0x0F0 #define CTL_INTF_ACTIVE 0x0F4 #define CTL_CDM_ACTIVE 0x0F8 #define CTL_FETCH_PIPE_ACTIVE 0x0FC #define CTL_MERGE_3D_FLUSH 0x100 #define CTL_DSC_FLUSH 0x104 #define CTL_WB_FLUSH 0x108 +#define CTL_CWB_FLUSH 0x10C #define CTL_INTF_FLUSH 0x110 #define CTL_CDM_FLUSH 0x114 #define CTL_PERIPH_FLUSH 0x128 @@ -53,6 +55,7 @@ #define PERIPH_IDX 30 #define INTF_IDX 31 #define WB_IDX 16 +#define CWB_IDX 28 #define DSPP_IDX 29 /* From DPU hw rev 7.x.x */ #define CTL_INVALID_BIT 0xffff #define CTL_DEFAULT_GROUP_ID 0xf @@ -110,6 +113,7 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx) ctx->pending_flush_mask = 0x0; ctx->pending_intf_flush_mask = 0; ctx->pending_wb_flush_mask = 0; + ctx->pending_cwb_flush_mask = 0; ctx->pending_merge_3d_flush_mask = 0; ctx->pending_dsc_flush_mask = 0; ctx->pending_cdm_flush_mask = 0; @@ -144,6 +148,9 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx) if (ctx->pending_flush_mask & BIT(WB_IDX)) DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH, ctx->pending_wb_flush_mask); + if (ctx->pending_flush_mask & BIT(CWB_IDX)) + DPU_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH, + ctx->pending_cwb_flush_mask); if (ctx->pending_flush_mask & BIT(DSPP_IDX)) for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) { @@ -254,6 +261,12 @@ static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx, case LM_5: ctx->pending_flush_mask |= BIT(20); break; + case LM_6: + ctx->pending_flush_mask |= BIT(21); + break; + case LM_7: + ctx->pending_flush_mask |= BIT(27); + break; default: break; } @@ -310,6 +323,13 @@ static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx, ctx->pending_flush_mask |= BIT(WB_IDX); } +static void dpu_hw_ctl_update_pending_flush_cwb_v1(struct dpu_hw_ctl *ctx, + enum dpu_cwb cwb) +{ + ctx->pending_cwb_flush_mask |= BIT(cwb - CWB_0); + ctx->pending_flush_mask |= BIT(CWB_IDX); +} + static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx, enum dpu_intf intf) { @@ -547,7 +567,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, u32 intf_active = 0; u32 dsc_active = 0; u32 wb_active = 0; + u32 cwb_active = 0; u32 mode_sel = 0; + u32 merge_3d_active = 0; /* CTL_TOP[31:28] carries group_id to collate CTL paths * per VM. Explicitly disable it until VM support is @@ -561,7 +583,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE); + cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE); dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE); + merge_3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE); if (cfg->intf) intf_active |= BIT(cfg->intf - INTF_0); @@ -569,17 +593,24 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, if (cfg->wb) wb_active |= BIT(cfg->wb - WB_0); + if (cfg->cwb) + cwb_active |= cfg->cwb; + if (cfg->dsc) dsc_active |= cfg->dsc; + if (cfg->merge_3d) + merge_3d_active |= BIT(cfg->merge_3d - MERGE_3D_0); + DPU_REG_WRITE(c, CTL_TOP, mode_sel); DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active); + DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active); DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active); + DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active); - if (cfg->merge_3d) - DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, - BIT(cfg->merge_3d - MERGE_3D_0)); + if (cfg->intf_master) + DPU_REG_WRITE(c, CTL_INTF_MASTER, BIT(cfg->intf_master - INTF_0)); if (cfg->cdm) DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm); @@ -623,7 +654,9 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx, { struct dpu_hw_blk_reg_map *c = &ctx->hw; u32 intf_active = 0; + u32 intf_master = 0; u32 wb_active = 0; + u32 cwb_active = 0; u32 merge3d_active = 0; u32 dsc_active; u32 cdm_active; @@ -645,10 +678,27 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx, dpu_hw_ctl_clear_all_blendstages(ctx); + if (ctx->ops.set_active_fetch_pipes) + ctx->ops.set_active_fetch_pipes(ctx, NULL); + if (cfg->intf) { intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); intf_active &= ~BIT(cfg->intf - INTF_0); DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); + + intf_master = DPU_REG_READ(c, CTL_INTF_MASTER); + + /* Unset this intf as master, if it is the current master */ + if (intf_master == BIT(cfg->intf - INTF_0)) { + DPU_DEBUG_DRIVER("Unsetting INTF_%d master\n", cfg->intf - INTF_0); + DPU_REG_WRITE(c, CTL_INTF_MASTER, 0); + } + } + + if (cfg->cwb) { + cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE); + cwb_active &= ~cfg->cwb; + DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active); } if (cfg->wb) { @@ -670,8 +720,8 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx, } } -static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx, - unsigned long *fetch_active) +static void dpu_hw_ctl_set_active_fetch_pipes(struct dpu_hw_ctl *ctx, + unsigned long *fetch_active) { int i; u32 val = 0; @@ -703,6 +753,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, ops->update_pending_flush_merge_3d = dpu_hw_ctl_update_pending_flush_merge_3d_v1; ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1; + ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1; ops->update_pending_flush_dsc = dpu_hw_ctl_update_pending_flush_dsc_v1; ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1; @@ -733,7 +784,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp; if (cap & BIT(DPU_CTL_FETCH_ACTIVE)) - ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; + ops->set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index 85c6c835cc87..feb09590bc8f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _DPU_HW_CTL_H @@ -36,21 +36,25 @@ struct dpu_hw_stage_cfg { /** * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface * @intf : Interface id + * @intf_master: Master interface id in the dual pipe topology * @mode_3d: 3d mux configuration * @merge_3d: 3d merge block used * @intf_mode_sel: Interface mode, cmd / vid * @cdm: CDM block used * @stream_sel: Stream selection for multi-stream interfaces * @dsc: DSC BIT masks used + * @cwb: CWB BIT masks used */ struct dpu_hw_intf_cfg { enum dpu_intf intf; + enum dpu_intf intf_master; enum dpu_wb wb; enum dpu_3d_blend_mode mode_3d; enum dpu_merge_3d merge_3d; enum dpu_ctl_mode_sel intf_mode_sel; enum dpu_cdm cdm; int stream_sel; + unsigned int cwb; unsigned int dsc; }; @@ -115,6 +119,15 @@ struct dpu_hw_ctl_ops { enum dpu_wb blk); /** + * OR in the given flushbits to the cached pending_(cwb_)flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : concurrent writeback block index + */ + void (*update_pending_flush_cwb)(struct dpu_hw_ctl *ctx, + enum dpu_cwb blk); + + /** * OR in the given flushbits to the cached pending_(intf_)flush_mask * No effect on hardware * @ctx : ctl path ctx pointer @@ -243,7 +256,7 @@ struct dpu_hw_ctl_ops { void (*setup_blendstage)(struct dpu_hw_ctl *ctx, enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg); - void (*set_active_pipes)(struct dpu_hw_ctl *ctx, + void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx, unsigned long *fetch_active); }; @@ -258,6 +271,7 @@ struct dpu_hw_ctl_ops { * @pending_flush_mask: storage for pending ctl_flush managed via ops * @pending_intf_flush_mask: pending INTF flush * @pending_wb_flush_mask: pending WB flush + * @pending_cwb_flush_mask: pending CWB flush * @pending_dsc_flush_mask: pending DSC flush * @pending_cdm_flush_mask: pending CDM flush * @ops: operation list @@ -274,6 +288,7 @@ struct dpu_hw_ctl { u32 pending_flush_mask; u32 pending_intf_flush_mask; u32 pending_wb_flush_mask; + u32 pending_cwb_flush_mask; u32 pending_periph_flush_mask; u32 pending_merge_3d_flush_mask; u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0]; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c index 657200401f57..cec6d4e8baec 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, u32 slice_last_group_size; u32 det_thresh_flatness; bool is_cmd_mode = !(mode & DSC_MODE_VIDEO); + bool input_10_bits = dsc->bits_per_component == 10; DPU_REG_WRITE(c, DSC_COMMON_MODE, mode); @@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, data |= (dsc->line_buf_depth << 3); data |= (dsc->simple_422 << 2); data |= (dsc->convert_rgb << 1); - data |= dsc->bits_per_component; + data |= input_10_bits; DPU_REG_WRITE(c, DSC_ENC, data); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index ba7bb05efe9b..175639c8bfbb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -77,12 +77,14 @@ enum dpu_hw_blk_type { DPU_HW_BLK_LM, DPU_HW_BLK_CTL, DPU_HW_BLK_PINGPONG, + DPU_HW_BLK_DCWB_PINGPONG, DPU_HW_BLK_INTF, DPU_HW_BLK_WB, DPU_HW_BLK_DSPP, DPU_HW_BLK_MERGE_3D, DPU_HW_BLK_DSC, DPU_HW_BLK_CDM, + DPU_HW_BLK_CWB, DPU_HW_BLK_MAX, }; @@ -123,6 +125,7 @@ enum dpu_lm { LM_4, LM_5, LM_6, + LM_7, LM_MAX }; @@ -167,6 +170,8 @@ enum dpu_dsc { DSC_3, DSC_4, DSC_5, + DSC_6, + DSC_7, DSC_MAX }; @@ -183,6 +188,8 @@ enum dpu_pingpong { PINGPONG_3, PINGPONG_4, PINGPONG_5, + PINGPONG_6, + PINGPONG_7, PINGPONG_CWB_0, PINGPONG_CWB_1, PINGPONG_CWB_2, @@ -197,6 +204,7 @@ enum dpu_merge_3d { MERGE_3D_2, MERGE_3D_3, MERGE_3D_4, + MERGE_3D_5, MERGE_3D_MAX }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c index ad19330de61a..562a3f4c5238 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c @@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, if (cap & BIT(DPU_MDP_VSYNC_SEL)) ops->setup_vsync_source = dpu_hw_setup_vsync_sel; - else + else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED))) ops->setup_vsync_source = dpu_hw_setup_wd_timer; ops->get_safe_status = dpu_hw_get_safe_status; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 97e9cb8c2b09..1fd82b6747e9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2013 Red Hat * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. * * Author: Rob Clark <robdclark@gmail.com> */ @@ -446,6 +446,19 @@ static void dpu_kms_disable_commit(struct msm_kms *kms) pm_runtime_put_sync(&dpu_kms->pdev->dev); } +static int dpu_kms_check_mode_changed(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct drm_crtc_state *new_crtc_state; + struct drm_crtc_state *old_crtc_state; + struct drm_crtc *crtc; + int i; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) + dpu_crtc_check_mode_changed(old_crtc_state, new_crtc_state); + + return 0; +} + static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -811,8 +824,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) return ret; num_encoders = 0; - drm_for_each_encoder(encoder, dev) + drm_for_each_encoder(encoder, dev) { num_encoders++; + if (catalog->cwb_count > 0) + encoder->possible_clones = dpu_encoder_get_clones(encoder); + } max_crtc_count = min(catalog->mixer_count, num_encoders); @@ -1062,6 +1078,7 @@ static const struct msm_kms_funcs kms_funcs = { .irq = dpu_core_irq, .enable_commit = dpu_kms_enable_commit, .disable_commit = dpu_kms_disable_commit, + .check_mode_changed = dpu_kms_check_mode_changed, .flush_commit = dpu_kms_flush_commit, .wait_flush = dpu_kms_wait_flush, .complete_commit = dpu_kms_complete_commit, @@ -1495,6 +1512,7 @@ static const struct of_device_id dpu_dt_match[] = { { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, }, { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, }, { .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, }, + { .compatible = "qcom,sar2130p-dpu", .data = &dpu_sar2130p_cfg, }, { .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, }, { .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, }, { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, }, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 547cdb2c0c78..a57ec2ec1060 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -124,14 +124,15 @@ struct dpu_global_state { struct dpu_rm *rm; - uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0]; - uint32_t mixer_to_enc_id[LM_MAX - LM_0]; - uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; - uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0]; - uint32_t dsc_to_enc_id[DSC_MAX - DSC_0]; - uint32_t cdm_to_enc_id; + uint32_t pingpong_to_crtc_id[PINGPONG_MAX - PINGPONG_0]; + uint32_t mixer_to_crtc_id[LM_MAX - LM_0]; + uint32_t ctl_to_crtc_id[CTL_MAX - CTL_0]; + uint32_t dspp_to_crtc_id[DSPP_MAX - DSPP_0]; + uint32_t dsc_to_crtc_id[DSC_MAX - DSC_0]; + uint32_t cdm_to_crtc_id; uint32_t sspp_to_crtc_id[SSPP_MAX - SSPP_NONE]; + uint32_t cwb_to_crtc_id[CWB_MAX - CWB_0]; }; struct dpu_global_state diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 098abc2c0003..421138bc3cb7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -729,12 +729,40 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe, struct dpu_sw_pipe_cfg *pipe_cfg, - const struct msm_format *fmt, - const struct drm_display_mode *mode) + const struct drm_display_mode *mode, + struct drm_plane_state *new_plane_state) { uint32_t min_src_size; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); int ret; + const struct msm_format *fmt; + uint32_t supported_rotations; + const struct dpu_sspp_cfg *pipe_hw_caps; + const struct dpu_sspp_sub_blks *sblk; + + pipe_hw_caps = pipe->sspp->cap; + sblk = pipe->sspp->cap->sblk; + + /* + * We already have verified scaling against platform limitations. + * Now check if the SSPP supports scaling at all. + */ + if (!sblk->scaler_blk.len && + ((drm_rect_width(&new_plane_state->src) >> 16 != + drm_rect_width(&new_plane_state->dst)) || + (drm_rect_height(&new_plane_state->src) >> 16 != + drm_rect_height(&new_plane_state->dst)))) + return -ERANGE; + + fmt = msm_framebuffer_format(new_plane_state->fb); + + supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; + + if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) + supported_rotations |= DRM_MODE_ROTATE_90; + + pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation, + supported_rotations); min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1; @@ -887,10 +915,9 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, return 0; } -static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp, - struct dpu_sw_pipe_cfg *pipe_cfg, - const struct msm_format *fmt, - uint32_t max_linewidth) +static int dpu_plane_is_multirect_capable(struct dpu_hw_sspp *sspp, + struct dpu_sw_pipe_cfg *pipe_cfg, + const struct msm_format *fmt) { if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) || drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect)) @@ -902,10 +929,6 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp, if (MSM_FORMAT_IS_YUV(fmt)) return false; - if (MSM_FORMAT_IS_UBWC(fmt) && - drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2) - return false; - if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) && !test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features)) return false; @@ -913,6 +936,27 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp, return true; } +static int dpu_plane_is_parallel_capable(struct dpu_sw_pipe_cfg *pipe_cfg, + const struct msm_format *fmt, + uint32_t max_linewidth) +{ + if (MSM_FORMAT_IS_UBWC(fmt) && + drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2) + return false; + + return true; +} + +static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp, + struct dpu_sw_pipe_cfg *pipe_cfg, + const struct msm_format *fmt, + uint32_t max_linewidth) +{ + return dpu_plane_is_multirect_capable(sspp, pipe_cfg, fmt) && + dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth); +} + + static int dpu_plane_atomic_check_sspp(struct drm_plane *plane, struct drm_atomic_state *state, const struct drm_crtc_state *crtc_state) @@ -923,47 +967,20 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane, struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); struct dpu_sw_pipe *pipe = &pstate->pipe; struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; - const struct msm_format *fmt; struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; - uint32_t supported_rotations; - const struct dpu_sspp_cfg *pipe_hw_caps; - const struct dpu_sspp_sub_blks *sblk; int ret = 0; - pipe_hw_caps = pipe->sspp->cap; - sblk = pipe->sspp->cap->sblk; - - /* - * We already have verified scaling against platform limitations. - * Now check if the SSPP supports scaling at all. - */ - if (!sblk->scaler_blk.len && - ((drm_rect_width(&new_plane_state->src) >> 16 != - drm_rect_width(&new_plane_state->dst)) || - (drm_rect_height(&new_plane_state->src) >> 16 != - drm_rect_height(&new_plane_state->dst)))) - return -ERANGE; - - fmt = msm_framebuffer_format(new_plane_state->fb); - - supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; - - if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) - supported_rotations |= DRM_MODE_ROTATE_90; - - pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation, - supported_rotations); - r_pipe_cfg->rotation = pipe_cfg->rotation; - - ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, - &crtc_state->adjusted_mode); + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, + &crtc_state->adjusted_mode, + new_plane_state); if (ret) return ret; if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) { - ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt, - &crtc_state->adjusted_mode); + ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, + &crtc_state->adjusted_mode, + new_plane_state); if (ret) return ret; } @@ -1001,6 +1018,69 @@ static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dp return true; } +static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate, + struct dpu_plane_state *prev_adjacent_pstate, + const struct msm_format *fmt, + uint32_t max_linewidth) +{ + struct dpu_sw_pipe *pipe = &pstate->pipe; + struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; + struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; + struct dpu_sw_pipe *prev_pipe = &prev_adjacent_pstate->pipe; + struct dpu_sw_pipe_cfg *prev_pipe_cfg = &prev_adjacent_pstate->pipe_cfg; + const struct msm_format *prev_fmt = msm_framebuffer_format(prev_adjacent_pstate->base.fb); + u16 max_tile_height = 1; + + if (prev_adjacent_pstate->r_pipe.sspp != NULL || + prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE) + return false; + + if (!dpu_plane_is_multirect_capable(pipe->sspp, pipe_cfg, fmt) || + !dpu_plane_is_multirect_capable(prev_pipe->sspp, prev_pipe_cfg, prev_fmt)) + return false; + + if (MSM_FORMAT_IS_UBWC(fmt)) + max_tile_height = max(max_tile_height, fmt->tile_height); + + if (MSM_FORMAT_IS_UBWC(prev_fmt)) + max_tile_height = max(max_tile_height, prev_fmt->tile_height); + + r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; + r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + + r_pipe->sspp = NULL; + + if (dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth) && + dpu_plane_is_parallel_capable(prev_pipe_cfg, prev_fmt, max_linewidth) && + (pipe_cfg->dst_rect.x1 >= prev_pipe_cfg->dst_rect.x2 || + prev_pipe_cfg->dst_rect.x1 >= pipe_cfg->dst_rect.x2)) { + pipe->sspp = prev_pipe->sspp; + + pipe->multirect_index = DPU_SSPP_RECT_1; + pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL; + + prev_pipe->multirect_index = DPU_SSPP_RECT_0; + prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL; + + return true; + } + + if (pipe_cfg->dst_rect.y1 >= prev_pipe_cfg->dst_rect.y2 + 2 * max_tile_height || + prev_pipe_cfg->dst_rect.y1 >= pipe_cfg->dst_rect.y2 + 2 * max_tile_height) { + pipe->sspp = prev_pipe->sspp; + + pipe->multirect_index = DPU_SSPP_RECT_1; + pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX; + + prev_pipe->multirect_index = DPU_SSPP_RECT_0; + prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX; + + return true; + } + + return false; +} + static int dpu_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -1059,6 +1139,9 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, struct drm_crtc_state *crtc_state; int ret; + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + if (plane_state->crtc) crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); @@ -1098,13 +1181,14 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane, static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc, struct dpu_global_state *global_state, struct drm_atomic_state *state, - struct drm_plane_state *plane_state) + struct drm_plane_state *plane_state, + struct drm_plane_state *prev_adjacent_plane_state) { const struct drm_crtc_state *crtc_state = NULL; struct drm_plane *plane = plane_state->plane; struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); struct dpu_rm_sspp_requirements reqs; - struct dpu_plane_state *pstate; + struct dpu_plane_state *pstate, *prev_adjacent_pstate; struct dpu_sw_pipe *pipe; struct dpu_sw_pipe *r_pipe; struct dpu_sw_pipe_cfg *pipe_cfg; @@ -1116,6 +1200,8 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc, plane_state->crtc); pstate = to_dpu_plane_state(plane_state); + prev_adjacent_pstate = prev_adjacent_plane_state ? + to_dpu_plane_state(prev_adjacent_plane_state) : NULL; pipe = &pstate->pipe; r_pipe = &pstate->r_pipe; pipe_cfg = &pstate->pipe_cfg; @@ -1134,24 +1220,42 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc, reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation); - pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs); - if (!pipe->sspp) - return -ENODEV; + if (drm_rect_width(&r_pipe_cfg->src_rect) == 0) { + if (!prev_adjacent_pstate || + !dpu_plane_try_multirect_shared(pstate, prev_adjacent_pstate, fmt, + dpu_kms->catalog->caps->max_linewidth)) { + pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs); + if (!pipe->sspp) + return -ENODEV; - if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg, - pipe->sspp, - msm_framebuffer_format(plane_state->fb), - dpu_kms->catalog->caps->max_linewidth)) { - /* multirect is not possible, use two SSPP blocks */ - r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs); - if (!r_pipe->sspp) + r_pipe->sspp = NULL; + + pipe->multirect_index = DPU_SSPP_RECT_SOLO; + pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + + r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; + r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + } + } else { + pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs); + if (!pipe->sspp) return -ENODEV; - pipe->multirect_index = DPU_SSPP_RECT_SOLO; - pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg, + pipe->sspp, + msm_framebuffer_format(plane_state->fb), + dpu_kms->catalog->caps->max_linewidth)) { + /* multirect is not possible, use two SSPP blocks */ + r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs); + if (!r_pipe->sspp) + return -ENODEV; - r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; - r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + pipe->multirect_index = DPU_SSPP_RECT_SOLO; + pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + + r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; + r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + } } return dpu_plane_atomic_check_sspp(plane, state, crtc_state); @@ -1164,7 +1268,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state, unsigned int num_planes) { unsigned int i; - int ret; + struct drm_plane_state *prev_adjacent_plane_state = NULL; for (i = 0; i < num_planes; i++) { struct drm_plane_state *plane_state = states[i]; @@ -1173,13 +1277,16 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state, !plane_state->visible) continue; - ret = dpu_plane_virtual_assign_resources(crtc, global_state, - state, plane_state); + int ret = dpu_plane_virtual_assign_resources(crtc, global_state, + state, plane_state, + prev_adjacent_plane_state); if (ret) break; + + prev_adjacent_plane_state = plane_state; } - return ret; + return 0; } static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 5baf9df702b8..2e296f79cba1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -22,9 +22,9 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx, - uint32_t enc_id) + uint32_t crtc_id) { - return res_map[idx] && res_map[idx] != enc_id; + return res_map[idx] && res_map[idx] != crtc_id; } /** @@ -53,6 +53,8 @@ int dpu_rm_init(struct drm_device *dev, /* Clear, setup lists */ memset(rm, 0, sizeof(*rm)); + rm->has_legacy_ctls = (cat->mdss_ver->core_major_ver < 5); + /* Interrogate HW catalog and create tracking items for hw blocks */ for (i = 0; i < cat->mixer_count; i++) { struct dpu_hw_mixer *hw; @@ -233,13 +235,66 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) return -EINVAL; } +static int _dpu_rm_reserve_cwb_mux_and_pingpongs(struct dpu_rm *rm, + struct dpu_global_state *global_state, + uint32_t crtc_id, + struct msm_display_topology *topology) +{ + int num_cwb_mux = topology->num_lm, cwb_mux_count = 0; + int cwb_pp_start_idx = PINGPONG_CWB_0 - PINGPONG_0; + int cwb_pp_idx[MAX_BLOCKS]; + int cwb_mux_idx[MAX_BLOCKS]; + + /* + * Reserve additional dedicated CWB PINGPONG blocks and muxes for each + * mixer + * + * TODO: add support reserving resources for platforms with no + * PINGPONG_CWB + */ + for (int i = 0; i < ARRAY_SIZE(rm->mixer_blks) && + cwb_mux_count < num_cwb_mux; i++) { + for (int j = 0; j < ARRAY_SIZE(rm->cwb_blks); j++) { + /* + * Odd LMs must be assigned to odd CWB muxes and even + * LMs with even CWB muxes. + * + * Since the RM HW block array index is based on the HW + * block ids, we can also use the array index to enforce + * the odd/even rule. See dpu_rm_init() for more + * information + */ + if (reserved_by_other(global_state->cwb_to_crtc_id, j, crtc_id) || + i % 2 != j % 2) + continue; + + cwb_mux_idx[cwb_mux_count] = j; + cwb_pp_idx[cwb_mux_count] = j + cwb_pp_start_idx; + cwb_mux_count++; + break; + } + } + + if (cwb_mux_count != num_cwb_mux) { + DPU_ERROR("Unable to reserve all CWB PINGPONGs\n"); + return -ENAVAIL; + } + + for (int i = 0; i < cwb_mux_count; i++) { + global_state->pingpong_to_crtc_id[cwb_pp_idx[i]] = crtc_id; + global_state->cwb_to_crtc_id[cwb_mux_idx[i]] = crtc_id; + } + + return 0; +} + /** * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets * proposed use case requirements, incl. hardwired dependent blocks like * pingpong * @rm: dpu resource manager handle * @global_state: resources shared across multiple kms objects - * @enc_id: encoder id requesting for allocation + * @crtc_id: crtc id requesting for allocation * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks * if lm, and all other hardwired blocks connected to the lm (pp) is * available and appropriate @@ -252,14 +307,14 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) */ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, struct dpu_global_state *global_state, - uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx, + uint32_t crtc_id, int lm_idx, int *pp_idx, int *dspp_idx, struct msm_display_topology *topology) { const struct dpu_lm_cfg *lm_cfg; int idx; /* Already reserved? */ - if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { + if (reserved_by_other(global_state->mixer_to_crtc_id, lm_idx, crtc_id)) { DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); return false; } @@ -271,7 +326,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, return false; } - if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { + if (reserved_by_other(global_state->pingpong_to_crtc_id, idx, crtc_id)) { DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, lm_cfg->pingpong); return false; @@ -287,7 +342,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, return false; } - if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) { + if (reserved_by_other(global_state->dspp_to_crtc_id, idx, crtc_id)) { DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id, lm_cfg->dspp); return false; @@ -299,7 +354,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, static int _dpu_rm_reserve_lms(struct dpu_rm *rm, struct dpu_global_state *global_state, - uint32_t enc_id, + uint32_t crtc_id, struct msm_display_topology *topology) { @@ -323,7 +378,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, lm_idx[lm_count] = i; if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, - enc_id, i, &pp_idx[lm_count], + crtc_id, i, &pp_idx[lm_count], &dspp_idx[lm_count], topology)) { continue; } @@ -342,7 +397,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, continue; if (!_dpu_rm_check_lm_and_get_connected_blks(rm, - global_state, enc_id, j, + global_state, crtc_id, j, &pp_idx[lm_count], &dspp_idx[lm_count], topology)) { continue; @@ -359,12 +414,12 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, } for (i = 0; i < lm_count; i++) { - global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; - global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; - global_state->dspp_to_enc_id[dspp_idx[i]] = - topology->num_dspp ? enc_id : 0; + global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id; + global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id; + global_state->dspp_to_crtc_id[dspp_idx[i]] = + topology->num_dspp ? crtc_id : 0; - trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, + trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, crtc_id, pp_idx[i] + PINGPONG_0); } @@ -374,17 +429,26 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, static int _dpu_rm_reserve_ctls( struct dpu_rm *rm, struct dpu_global_state *global_state, - uint32_t enc_id, + uint32_t crtc_id, const struct msm_display_topology *top) { int ctl_idx[MAX_BLOCKS]; int i = 0, j, num_ctls; bool needs_split_display; - /* each hw_intf needs its own hw_ctrl to program its control path */ - num_ctls = top->num_intf; - - needs_split_display = _dpu_rm_needs_split_display(top); + if (rm->has_legacy_ctls) { + /* + * TODO: check if there is a need for special handling if + * DPU < 5.0 get CWB support. + */ + num_ctls = top->num_intf; + + needs_split_display = _dpu_rm_needs_split_display(top); + } else { + /* use single CTL */ + num_ctls = 1; + needs_split_display = false; + } for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { const struct dpu_hw_ctl *ctl; @@ -393,7 +457,7 @@ static int _dpu_rm_reserve_ctls( if (!rm->ctl_blks[j]) continue; - if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) + if (reserved_by_other(global_state->ctl_to_crtc_id, j, crtc_id)) continue; ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); @@ -417,8 +481,8 @@ static int _dpu_rm_reserve_ctls( return -ENAVAIL; for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { - global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; - trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); + global_state->ctl_to_crtc_id[ctl_idx[i]] = crtc_id; + trace_dpu_rm_reserve_ctls(i + CTL_0, crtc_id); } return 0; @@ -426,12 +490,12 @@ static int _dpu_rm_reserve_ctls( static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state, int start, - uint32_t enc_id) + uint32_t crtc_id) { int i; for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) { - if (global_state->pingpong_to_enc_id[i] == enc_id) + if (global_state->pingpong_to_crtc_id[i] == crtc_id) return i; } @@ -452,7 +516,7 @@ static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx) static int _dpu_rm_dsc_alloc(struct dpu_rm *rm, struct dpu_global_state *global_state, - uint32_t enc_id, + uint32_t crtc_id, const struct msm_display_topology *top) { int num_dsc = 0; @@ -465,10 +529,10 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm, if (!rm->dsc_blks[dsc_idx]) continue; - if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id)) + if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id)) continue; - pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id); + pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id); if (pp_idx < 0) return -ENAVAIL; @@ -476,7 +540,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm, if (ret) return -ENAVAIL; - global_state->dsc_to_enc_id[dsc_idx] = enc_id; + global_state->dsc_to_crtc_id[dsc_idx] = crtc_id; num_dsc++; pp_idx++; } @@ -492,7 +556,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm, static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm, struct dpu_global_state *global_state, - uint32_t enc_id, + uint32_t crtc_id, const struct msm_display_topology *top) { int num_dsc = 0; @@ -507,11 +571,11 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm, continue; /* consective dsc index to be paired */ - if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id) || - reserved_by_other(global_state->dsc_to_enc_id, dsc_idx + 1, enc_id)) + if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id) || + reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx + 1, crtc_id)) continue; - pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id); + pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id); if (pp_idx < 0) return -ENAVAIL; @@ -521,7 +585,7 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm, continue; } - pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, enc_id); + pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, crtc_id); if (pp_idx < 0) return -ENAVAIL; @@ -531,8 +595,8 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm, continue; } - global_state->dsc_to_enc_id[dsc_idx] = enc_id; - global_state->dsc_to_enc_id[dsc_idx + 1] = enc_id; + global_state->dsc_to_crtc_id[dsc_idx] = crtc_id; + global_state->dsc_to_crtc_id[dsc_idx + 1] = crtc_id; num_dsc += 2; pp_idx++; /* start for next pair */ } @@ -548,11 +612,9 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm, static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, struct dpu_global_state *global_state, - struct drm_encoder *enc, + uint32_t crtc_id, const struct msm_display_topology *top) { - uint32_t enc_id = enc->base.id; - if (!top->num_dsc || !top->num_intf) return 0; @@ -568,16 +630,17 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, /* num_dsc should be either 1, 2 or 4 */ if (top->num_dsc > top->num_intf) /* merge mode */ - return _dpu_rm_dsc_alloc_pair(rm, global_state, enc_id, top); + return _dpu_rm_dsc_alloc_pair(rm, global_state, crtc_id, top); else - return _dpu_rm_dsc_alloc(rm, global_state, enc_id, top); + return _dpu_rm_dsc_alloc(rm, global_state, crtc_id, top); return 0; } static int _dpu_rm_reserve_cdm(struct dpu_rm *rm, struct dpu_global_state *global_state, - struct drm_encoder *enc) + uint32_t crtc_id, + int num_cdm) { /* try allocating only one CDM block */ if (!rm->cdm_blk) { @@ -585,12 +648,17 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm, return -EIO; } - if (global_state->cdm_to_enc_id) { + if (num_cdm > 1) { + DPU_ERROR("More than 1 INTF requesting CDM\n"); + return -EINVAL; + } + + if (global_state->cdm_to_crtc_id) { DPU_ERROR("CDM_0 is already allocated\n"); return -EIO; } - global_state->cdm_to_enc_id = enc->base.id; + global_state->cdm_to_crtc_id = crtc_id; return 0; } @@ -598,30 +666,37 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm, static int _dpu_rm_make_reservation( struct dpu_rm *rm, struct dpu_global_state *global_state, - struct drm_encoder *enc, + uint32_t crtc_id, struct msm_display_topology *topology) { int ret; - ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, topology); + ret = _dpu_rm_reserve_lms(rm, global_state, crtc_id, topology); if (ret) { DPU_ERROR("unable to find appropriate mixers\n"); return ret; } - ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, + if (topology->cwb_enabled) { + ret = _dpu_rm_reserve_cwb_mux_and_pingpongs(rm, global_state, + crtc_id, topology); + if (ret) + return ret; + } + + ret = _dpu_rm_reserve_ctls(rm, global_state, crtc_id, topology); if (ret) { DPU_ERROR("unable to find appropriate CTL\n"); return ret; } - ret = _dpu_rm_reserve_dsc(rm, global_state, enc, topology); + ret = _dpu_rm_reserve_dsc(rm, global_state, crtc_id, topology); if (ret) return ret; - if (topology->needs_cdm) { - ret = _dpu_rm_reserve_cdm(rm, global_state, enc); + if (topology->num_cdm > 0) { + ret = _dpu_rm_reserve_cdm(rm, global_state, crtc_id, topology->num_cdm); if (ret) { DPU_ERROR("unable to find CDM blk\n"); return ret; @@ -632,12 +707,12 @@ static int _dpu_rm_make_reservation( } static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, - uint32_t enc_id) + uint32_t crtc_id) { int i; for (i = 0; i < cnt; i++) { - if (res_mapping[i] == enc_id) + if (res_mapping[i] == crtc_id) res_mapping[i] = 0; } } @@ -646,23 +721,27 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, * dpu_rm_release - Given the encoder for the display chain, release any * HW blocks previously reserved for that use case. * @global_state: resources shared across multiple kms objects - * @enc: DRM Encoder handle + * @crtc: DRM CRTC handle * @return: 0 on Success otherwise -ERROR */ void dpu_rm_release(struct dpu_global_state *global_state, - struct drm_encoder *enc) + struct drm_crtc *crtc) { - _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, - ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); - _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, - ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); - _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, - ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); - _dpu_rm_clear_mapping(global_state->dsc_to_enc_id, - ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); - _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, - ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); - _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id); + uint32_t crtc_id = crtc->base.id; + + _dpu_rm_clear_mapping(global_state->pingpong_to_crtc_id, + ARRAY_SIZE(global_state->pingpong_to_crtc_id), crtc_id); + _dpu_rm_clear_mapping(global_state->mixer_to_crtc_id, + ARRAY_SIZE(global_state->mixer_to_crtc_id), crtc_id); + _dpu_rm_clear_mapping(global_state->ctl_to_crtc_id, + ARRAY_SIZE(global_state->ctl_to_crtc_id), crtc_id); + _dpu_rm_clear_mapping(global_state->dsc_to_crtc_id, + ARRAY_SIZE(global_state->dsc_to_crtc_id), crtc_id); + _dpu_rm_clear_mapping(global_state->dspp_to_crtc_id, + ARRAY_SIZE(global_state->dspp_to_crtc_id), crtc_id); + _dpu_rm_clear_mapping(&global_state->cdm_to_crtc_id, 1, crtc_id); + _dpu_rm_clear_mapping(global_state->cwb_to_crtc_id, + ARRAY_SIZE(global_state->cwb_to_crtc_id), crtc_id); } /** @@ -674,42 +753,33 @@ void dpu_rm_release(struct dpu_global_state *global_state, * HW Reservations should be released via dpu_rm_release_hw. * @rm: DPU Resource Manager handle * @global_state: resources shared across multiple kms objects - * @enc: DRM Encoder handle - * @crtc_state: Proposed Atomic DRM CRTC State handle + * @crtc: DRM CRTC handle * @topology: Pointer to topology info for the display * @return: 0 on Success otherwise -ERROR */ int dpu_rm_reserve( struct dpu_rm *rm, struct dpu_global_state *global_state, - struct drm_encoder *enc, - struct drm_crtc_state *crtc_state, + struct drm_crtc *crtc, struct msm_display_topology *topology) { int ret; - /* Check if this is just a page-flip */ - if (!drm_atomic_crtc_needs_modeset(crtc_state)) - return 0; - if (IS_ERR(global_state)) { DPU_ERROR("failed to global state\n"); return PTR_ERR(global_state); } - DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", - enc->base.id, crtc_state->crtc->base.id); + DRM_DEBUG_KMS("reserving hw for crtc %d\n", crtc->base.id); DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n", topology->num_lm, topology->num_dsc, topology->num_intf); - ret = _dpu_rm_make_reservation(rm, global_state, enc, topology); + ret = _dpu_rm_make_reservation(rm, global_state, crtc->base.id, topology); if (ret) DPU_ERROR("failed to reserve hw resources: %d\n", ret); - - return ret; } @@ -800,50 +870,57 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state, * assigned to this encoder * @rm: DPU Resource Manager handle * @global_state: resources shared across multiple kms objects - * @enc_id: encoder id requesting for allocation + * @crtc: DRM CRTC handle * @type: resource type to return data for * @blks: pointer to the array to be filled by HW resources * @blks_size: size of the @blks array */ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, - struct dpu_global_state *global_state, uint32_t enc_id, + struct dpu_global_state *global_state, struct drm_crtc *crtc, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) { + uint32_t crtc_id = crtc->base.id; struct dpu_hw_blk **hw_blks; - uint32_t *hw_to_enc_id; + uint32_t *hw_to_crtc_id; int i, num_blks, max_blks; switch (type) { case DPU_HW_BLK_PINGPONG: + case DPU_HW_BLK_DCWB_PINGPONG: hw_blks = rm->pingpong_blks; - hw_to_enc_id = global_state->pingpong_to_enc_id; + hw_to_crtc_id = global_state->pingpong_to_crtc_id; max_blks = ARRAY_SIZE(rm->pingpong_blks); break; case DPU_HW_BLK_LM: hw_blks = rm->mixer_blks; - hw_to_enc_id = global_state->mixer_to_enc_id; + hw_to_crtc_id = global_state->mixer_to_crtc_id; max_blks = ARRAY_SIZE(rm->mixer_blks); break; case DPU_HW_BLK_CTL: hw_blks = rm->ctl_blks; - hw_to_enc_id = global_state->ctl_to_enc_id; + hw_to_crtc_id = global_state->ctl_to_crtc_id; max_blks = ARRAY_SIZE(rm->ctl_blks); break; case DPU_HW_BLK_DSPP: hw_blks = rm->dspp_blks; - hw_to_enc_id = global_state->dspp_to_enc_id; + hw_to_crtc_id = global_state->dspp_to_crtc_id; max_blks = ARRAY_SIZE(rm->dspp_blks); break; case DPU_HW_BLK_DSC: hw_blks = rm->dsc_blks; - hw_to_enc_id = global_state->dsc_to_enc_id; + hw_to_crtc_id = global_state->dsc_to_crtc_id; max_blks = ARRAY_SIZE(rm->dsc_blks); break; case DPU_HW_BLK_CDM: hw_blks = &rm->cdm_blk; - hw_to_enc_id = &global_state->cdm_to_enc_id; + hw_to_crtc_id = &global_state->cdm_to_crtc_id; max_blks = 1; break; + case DPU_HW_BLK_CWB: + hw_blks = rm->cwb_blks; + hw_to_crtc_id = global_state->cwb_to_crtc_id; + max_blks = ARRAY_SIZE(rm->cwb_blks); + break; default: DPU_ERROR("blk type %d not managed by rm\n", type); return 0; @@ -851,17 +928,31 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, num_blks = 0; for (i = 0; i < max_blks; i++) { - if (hw_to_enc_id[i] != enc_id) + if (hw_to_crtc_id[i] != crtc_id) continue; + if (type == DPU_HW_BLK_PINGPONG) { + struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]); + + if (pp->idx >= PINGPONG_CWB_0) + continue; + } + + if (type == DPU_HW_BLK_DCWB_PINGPONG) { + struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]); + + if (pp->idx < PINGPONG_CWB_0) + continue; + } + if (num_blks == blks_size) { - DPU_ERROR("More than %d resources assigned to enc %d\n", - blks_size, enc_id); + DPU_ERROR("More than %d resources assigned to crtc %d\n", + blks_size, crtc_id); break; } if (!hw_blks[i]) { - DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n", - type, enc_id); + DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n", + type, crtc_id); break; } blks[num_blks++] = hw_blks[i]; @@ -896,38 +987,38 @@ void dpu_rm_print_state(struct drm_printer *p, drm_puts(p, "resource mapping:\n"); drm_puts(p, "\tpingpong="); - for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_enc_id); i++) + for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_crtc_id); i++) dpu_rm_print_state_helper(p, rm->pingpong_blks[i], - global_state->pingpong_to_enc_id[i]); + global_state->pingpong_to_crtc_id[i]); drm_puts(p, "\n"); drm_puts(p, "\tmixer="); - for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_enc_id); i++) + for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_crtc_id); i++) dpu_rm_print_state_helper(p, rm->mixer_blks[i], - global_state->mixer_to_enc_id[i]); + global_state->mixer_to_crtc_id[i]); drm_puts(p, "\n"); drm_puts(p, "\tctl="); - for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_enc_id); i++) + for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_crtc_id); i++) dpu_rm_print_state_helper(p, rm->ctl_blks[i], - global_state->ctl_to_enc_id[i]); + global_state->ctl_to_crtc_id[i]); drm_puts(p, "\n"); drm_puts(p, "\tdspp="); - for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_enc_id); i++) + for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_crtc_id); i++) dpu_rm_print_state_helper(p, rm->dspp_blks[i], - global_state->dspp_to_enc_id[i]); + global_state->dspp_to_crtc_id[i]); drm_puts(p, "\n"); drm_puts(p, "\tdsc="); - for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_enc_id); i++) + for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_crtc_id); i++) dpu_rm_print_state_helper(p, rm->dsc_blks[i], - global_state->dsc_to_enc_id[i]); + global_state->dsc_to_crtc_id[i]); drm_puts(p, "\n"); drm_puts(p, "\tcdm="); dpu_rm_print_state_helper(p, rm->cdm_blk, - global_state->cdm_to_enc_id); + global_state->cdm_to_crtc_id); drm_puts(p, "\n"); drm_puts(p, "\tsspp="); @@ -936,4 +1027,10 @@ void dpu_rm_print_state(struct drm_printer *p, dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL, global_state->sspp_to_crtc_id[i]); drm_puts(p, "\n"); + + drm_puts(p, "\tcwb="); + for (i = 0; i < ARRAY_SIZE(global_state->cwb_to_crtc_id); i++) + dpu_rm_print_state_helper(p, rm->cwb_blks[i], + global_state->cwb_to_crtc_id[i]); + drm_puts(p, "\n"); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index 99bd594ee0d1..aa62966056d4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -24,6 +24,7 @@ struct dpu_global_state; * @dspp_blks: array of dspp hardware resources * @hw_sspp: array of sspp hardware resources * @cdm_blk: cdm hardware resource + * @has_legacy_ctls: DPU uses pre-ACTIVE CTL blocks. */ struct dpu_rm { struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0]; @@ -37,6 +38,7 @@ struct dpu_rm { struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0]; struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE]; struct dpu_hw_blk *cdm_blk; + bool has_legacy_ctls; }; struct dpu_rm_sspp_requirements { @@ -51,14 +53,17 @@ struct dpu_rm_sspp_requirements { * @num_intf: number of interfaces the panel is mounted on * @num_dspp: number of dspp blocks used * @num_dsc: number of Display Stream Compression (DSC) blocks used - * @needs_cdm: indicates whether cdm block is needed for this display topology + * @num_cdm: indicates how many outputs are requesting cdm block for + * this display topology + * @cwb_enabled: indicates whether CWB is enabled for this display topology */ struct msm_display_topology { u32 num_lm; u32 num_intf; u32 num_dspp; u32 num_dsc; - bool needs_cdm; + int num_cdm; + bool cwb_enabled; }; int dpu_rm_init(struct drm_device *dev, @@ -69,12 +74,11 @@ int dpu_rm_init(struct drm_device *dev, int dpu_rm_reserve(struct dpu_rm *rm, struct dpu_global_state *global_state, - struct drm_encoder *drm_enc, - struct drm_crtc_state *crtc_state, + struct drm_crtc *crtc, struct msm_display_topology *topology); void dpu_rm_release(struct dpu_global_state *global_state, - struct drm_encoder *enc); + struct drm_crtc *crtc); struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm, struct dpu_global_state *global_state, @@ -85,7 +89,7 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state, struct drm_crtc *crtc); int dpu_rm_get_assigned_resources(struct dpu_rm *rm, - struct dpu_global_state *global_state, uint32_t enc_id, + struct dpu_global_state *global_state, struct drm_crtc *crtc, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); void dpu_rm_print_state(struct drm_printer *p, diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index c469e66cfc11..7e942c1337b3 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -6,6 +6,8 @@ #include <linux/delay.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_vblank.h> #include "msm_drv.h" @@ -189,7 +191,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, struct msm_drm_private *priv = dev->dev_private; struct drm_encoder *encoder; struct drm_connector *connector; - struct device_node *panel_node; + struct drm_bridge *next_bridge; int dsi_id; int ret; @@ -199,27 +201,43 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, * bail out early if there is no panel node (no need to * initialize LCDC encoder and LVDS connector) */ - panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0); - if (!panel_node) - return 0; + next_bridge = devm_drm_of_get_bridge(dev->dev, dev->dev->of_node, 0, 0); + if (IS_ERR(next_bridge)) { + ret = PTR_ERR(next_bridge); + if (ret == -ENODEV) + return 0; + return ret; + } - encoder = mdp4_lcdc_encoder_init(dev, panel_node); + encoder = mdp4_lcdc_encoder_init(dev); if (IS_ERR(encoder)) { DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n"); - of_node_put(panel_node); return PTR_ERR(encoder); } /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */ encoder->possible_crtcs = 1 << DMA_P; - connector = mdp4_lvds_connector_init(dev, panel_node, encoder); + ret = drm_bridge_attach(encoder, next_bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to attach LVDS panel/bridge: %d\n", ret); + + return ret; + } + + connector = drm_bridge_connector_init(dev, encoder); if (IS_ERR(connector)) { DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n"); - of_node_put(panel_node); return PTR_ERR(connector); } + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to attach LVDS connector: %d\n", ret); + + return ret; + } + break; case DRM_MODE_ENCODER_TMDS: encoder = mdp4_dtv_encoder_init(dev); diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h index 94b1ba92785f..f9d988076337 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h @@ -191,12 +191,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate); struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev); -long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate); -struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, - struct device_node *panel_node); - -struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, - struct device_node *panel_node, struct drm_encoder *encoder); +struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev); #ifdef CONFIG_DRM_MSM_DSI struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev); @@ -207,13 +202,6 @@ static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev) } #endif -#ifdef CONFIG_COMMON_CLK -struct clk *mpd4_lvds_pll_init(struct drm_device *dev); -#else -static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev) -{ - return ERR_PTR(-ENODEV); -} -#endif +struct clk *mpd4_get_lcdc_clock(struct drm_device *dev); #endif /* __MDP4_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c index 8bbc7fb881d5..06a307c1272d 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c @@ -14,7 +14,6 @@ struct mdp4_lcdc_encoder { struct drm_encoder base; - struct device_node *panel_node; struct drm_panel *panel; struct clk *lcdc_clk; unsigned long int pixclock; @@ -262,19 +261,12 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = to_mdp4_lcdc_encoder(encoder); struct mdp4_kms *mdp4_kms = get_kms(encoder); - struct drm_panel *panel; if (WARN_ON(!mdp4_lcdc_encoder->enabled)) return; mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); - panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); - if (!IS_ERR(panel)) { - drm_panel_disable(panel); - drm_panel_unprepare(panel); - } - /* * Wait for a vsync so we know the ENABLE=0 latched before * the (connector) source of the vsync's gets disabled, @@ -300,7 +292,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) to_mdp4_lcdc_encoder(encoder); unsigned long pc = mdp4_lcdc_encoder->pixclock; struct mdp4_kms *mdp4_kms = get_kms(encoder); - struct drm_panel *panel; uint32_t config; int ret; @@ -335,12 +326,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) if (ret) DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret); - panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); - if (!IS_ERR(panel)) { - drm_panel_prepare(panel); - drm_panel_enable(panel); - } - setup_phy(encoder); mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1); @@ -348,22 +333,34 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) mdp4_lcdc_encoder->enabled = true; } +static enum drm_mode_status +mdp4_lcdc_encoder_mode_valid(struct drm_encoder *encoder, + const struct drm_display_mode *mode) +{ + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + long actual, requested; + + requested = 1000 * mode->clock; + actual = clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, requested); + + DBG("requested=%ld, actual=%ld", requested, actual); + + if (actual != requested) + return MODE_CLOCK_RANGE; + + return MODE_OK; +} + static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = { .mode_set = mdp4_lcdc_encoder_mode_set, .disable = mdp4_lcdc_encoder_disable, .enable = mdp4_lcdc_encoder_enable, + .mode_valid = mdp4_lcdc_encoder_mode_valid, }; -long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate) -{ - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = - to_mdp4_lcdc_encoder(encoder); - return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate); -} - /* initialize encoder */ -struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, - struct device_node *panel_node) +struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev) { struct drm_encoder *encoder; struct mdp4_lcdc_encoder *mdp4_lcdc_encoder; @@ -374,14 +371,11 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, if (IS_ERR(mdp4_lcdc_encoder)) return ERR_CAST(mdp4_lcdc_encoder); - mdp4_lcdc_encoder->panel_node = panel_node; - encoder = &mdp4_lcdc_encoder->base; drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs); - /* TODO: do we need different pll in other cases? */ - mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev); + mdp4_lcdc_encoder->lcdc_clk = mpd4_get_lcdc_clock(dev); if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) { DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n"); return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk); diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c deleted file mode 100644 index 7444b75c4215..000000000000 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c +++ /dev/null @@ -1,121 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2014 Red Hat - * Author: Rob Clark <robdclark@gmail.com> - * Author: Vinay Simha <vinaysimha@inforcecomputing.com> - */ - -#include "mdp4_kms.h" - -struct mdp4_lvds_connector { - struct drm_connector base; - struct drm_encoder *encoder; - struct device_node *panel_node; - struct drm_panel *panel; -}; -#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base) - -static enum drm_connector_status mdp4_lvds_connector_detect( - struct drm_connector *connector, bool force) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - - if (!mdp4_lvds_connector->panel) { - mdp4_lvds_connector->panel = - of_drm_find_panel(mdp4_lvds_connector->panel_node); - if (IS_ERR(mdp4_lvds_connector->panel)) - mdp4_lvds_connector->panel = NULL; - } - - return mdp4_lvds_connector->panel ? - connector_status_connected : - connector_status_disconnected; -} - -static void mdp4_lvds_connector_destroy(struct drm_connector *connector) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - - drm_connector_cleanup(connector); - - kfree(mdp4_lvds_connector); -} - -static int mdp4_lvds_connector_get_modes(struct drm_connector *connector) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - struct drm_panel *panel = mdp4_lvds_connector->panel; - int ret = 0; - - if (panel) - ret = drm_panel_get_modes(panel, connector); - - return ret; -} - -static enum drm_mode_status -mdp4_lvds_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - struct drm_encoder *encoder = mdp4_lvds_connector->encoder; - long actual, requested; - - requested = 1000 * mode->clock; - actual = mdp4_lcdc_round_pixclk(encoder, requested); - - DBG("requested=%ld, actual=%ld", requested, actual); - - if (actual != requested) - return MODE_CLOCK_RANGE; - - return MODE_OK; -} - -static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { - .detect = mdp4_lvds_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = mdp4_lvds_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { - .get_modes = mdp4_lvds_connector_get_modes, - .mode_valid = mdp4_lvds_connector_mode_valid, -}; - -/* initialize connector */ -struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, - struct device_node *panel_node, struct drm_encoder *encoder) -{ - struct drm_connector *connector = NULL; - struct mdp4_lvds_connector *mdp4_lvds_connector; - - mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL); - if (!mdp4_lvds_connector) - return ERR_PTR(-ENOMEM); - - mdp4_lvds_connector->encoder = encoder; - mdp4_lvds_connector->panel_node = panel_node; - - connector = &mdp4_lvds_connector->base; - - drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); - drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs); - - connector->polled = 0; - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - drm_connector_attach_encoder(connector, encoder); - - return connector; -} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c index ab8c0c187fb2..fa2c29470510 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c @@ -122,40 +122,59 @@ static const struct clk_ops mpd4_lvds_pll_ops = { .set_rate = mpd4_lvds_pll_set_rate, }; -static const char *mpd4_lvds_pll_parents[] = { - "pxo", +static const struct clk_parent_data mpd4_lvds_pll_parents[] = { + { .fw_name = "pxo", .name = "pxo", }, }; static struct clk_init_data pll_init = { .name = "mpd4_lvds_pll", .ops = &mpd4_lvds_pll_ops, - .parent_names = mpd4_lvds_pll_parents, + .parent_data = mpd4_lvds_pll_parents, .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents), }; -struct clk *mpd4_lvds_pll_init(struct drm_device *dev) +static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev) { struct mdp4_lvds_pll *lvds_pll; - struct clk *clk; int ret; lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL); - if (!lvds_pll) { - ret = -ENOMEM; - goto fail; - } + if (!lvds_pll) + return ERR_PTR(-ENOMEM); lvds_pll->dev = dev; lvds_pll->pll_hw.init = &pll_init; - clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - goto fail; + ret = devm_clk_hw_register(dev->dev, &lvds_pll->pll_hw); + if (ret) + return ERR_PTR(ret); + + ret = devm_of_clk_add_hw_provider(dev->dev, of_clk_hw_simple_get, &lvds_pll->pll_hw); + if (ret) + return ERR_PTR(ret); + + return &lvds_pll->pll_hw; +} + +struct clk *mpd4_get_lcdc_clock(struct drm_device *dev) +{ + struct clk_hw *hw; + struct clk *clk; + + + /* TODO: do we need different pll in other cases? */ + hw = mpd4_lvds_pll_init(dev); + if (IS_ERR(hw)) { + DRM_DEV_ERROR(dev->dev, "failed to register LVDS PLL\n"); + return ERR_CAST(hw); } - return clk; + clk = devm_clk_get(dev->dev, "lcdc_clk"); + if (clk == ERR_PTR(-ENOENT)) { + drm_warn(dev, "can't get LCDC clock, using PLL directly\n"); -fail: - return ERR_PTR(ret); + return devm_clk_hw_get_clk(dev->dev, hw, "lcdc_clk"); + } + + return clk; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index 666de99a46a5..fc183fe37f56 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -3,6 +3,7 @@ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. */ +#include <linux/string_choices.h> #include "mdp5_kms.h" #include "mdp5_ctl.h" @@ -233,7 +234,7 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, return -EINVAL; ctl->encoder_enabled = enabled; - DBG("intf_%d: %s", intf->num, enabled ? "on" : "off"); + DBG("intf_%d: %s", intf->num, str_on_off(enabled)); if (start_signal_needed(ctl, pipeline)) { send_start_signal(ctl); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 62de248ed1b0..bb1601921938 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -368,7 +368,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, } static int mdp5_plane_atomic_async_check(struct drm_plane *plane, - struct drm_atomic_state *state) + struct drm_atomic_state *state, bool flip) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index 70fdc9fe228a..f8bfb908f9b4 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -13,13 +13,13 @@ #include "dp_catalog.h" #include "dp_audio.h" +#include "dp_drm.h" #include "dp_panel.h" #include "dp_reg.h" #include "dp_display.h" #include "dp_utils.h" struct msm_dp_audio_private { - struct platform_device *audio_pdev; struct platform_device *pdev; struct drm_device *drm_dev; struct msm_dp_catalog *catalog; @@ -160,24 +160,11 @@ static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable) msm_dp_catalog_audio_enable(catalog, enable); } -static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev) +static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_display) { struct msm_dp_audio *msm_dp_audio; - struct msm_dp *msm_dp_display; - - if (!pdev) { - DRM_ERROR("invalid input\n"); - return ERR_PTR(-ENODEV); - } - - msm_dp_display = platform_get_drvdata(pdev); - if (!msm_dp_display) { - DRM_ERROR("invalid input\n"); - return ERR_PTR(-ENODEV); - } msm_dp_audio = msm_dp_display->msm_dp_audio; - if (!msm_dp_audio) { DRM_ERROR("invalid msm_dp_audio data\n"); return ERR_PTR(-EINVAL); @@ -186,68 +173,16 @@ static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); } -static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data, - hdmi_codec_plugged_cb fn, - struct device *codec_dev) -{ - - struct platform_device *pdev; - struct msm_dp *msm_dp_display; - - pdev = to_platform_device(dev); - if (!pdev) { - pr_err("invalid input\n"); - return -ENODEV; - } - - msm_dp_display = platform_get_drvdata(pdev); - if (!msm_dp_display) { - pr_err("invalid input\n"); - return -ENODEV; - } - - return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev); -} - -static int msm_dp_audio_get_eld(struct device *dev, - void *data, uint8_t *buf, size_t len) -{ - struct platform_device *pdev; - struct msm_dp *msm_dp_display; - - pdev = to_platform_device(dev); - - if (!pdev) { - DRM_ERROR("invalid input\n"); - return -ENODEV; - } - - msm_dp_display = platform_get_drvdata(pdev); - if (!msm_dp_display) { - DRM_ERROR("invalid input\n"); - return -ENODEV; - } - - mutex_lock(&msm_dp_display->connector->eld_mutex); - memcpy(buf, msm_dp_display->connector->eld, - min(sizeof(msm_dp_display->connector->eld), len)); - mutex_unlock(&msm_dp_display->connector->eld_mutex); - - return 0; -} - -int msm_dp_audio_hw_params(struct device *dev, - void *data, - struct hdmi_codec_daifmt *daifmt, - struct hdmi_codec_params *params) +int msm_dp_audio_prepare(struct drm_connector *connector, + struct drm_bridge *bridge, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) { int rc = 0; struct msm_dp_audio_private *audio; - struct platform_device *pdev; struct msm_dp *msm_dp_display; - pdev = to_platform_device(dev); - msm_dp_display = platform_get_drvdata(pdev); + msm_dp_display = to_dp_bridge(bridge)->msm_dp_display; /* * there could be cases where sound card can be opened even @@ -262,7 +197,7 @@ int msm_dp_audio_hw_params(struct device *dev, goto end; } - audio = msm_dp_audio_get_data(pdev); + audio = msm_dp_audio_get_data(msm_dp_display); if (IS_ERR(audio)) { rc = PTR_ERR(audio); goto end; @@ -281,15 +216,14 @@ end: return rc; } -static void msm_dp_audio_shutdown(struct device *dev, void *data) +void msm_dp_audio_shutdown(struct drm_connector *connector, + struct drm_bridge *bridge) { struct msm_dp_audio_private *audio; - struct platform_device *pdev; struct msm_dp *msm_dp_display; - pdev = to_platform_device(dev); - msm_dp_display = platform_get_drvdata(pdev); - audio = msm_dp_audio_get_data(pdev); + msm_dp_display = to_dp_bridge(bridge)->msm_dp_display; + audio = msm_dp_audio_get_data(msm_dp_display); if (IS_ERR(audio)) { DRM_ERROR("failed to get audio data\n"); return; @@ -311,47 +245,6 @@ static void msm_dp_audio_shutdown(struct device *dev, void *data) msm_dp_display_signal_audio_complete(msm_dp_display); } -static const struct hdmi_codec_ops msm_dp_audio_codec_ops = { - .hw_params = msm_dp_audio_hw_params, - .audio_shutdown = msm_dp_audio_shutdown, - .get_eld = msm_dp_audio_get_eld, - .hook_plugged_cb = msm_dp_audio_hook_plugged_cb, -}; - -static struct hdmi_codec_pdata codec_data = { - .ops = &msm_dp_audio_codec_ops, - .max_i2s_channels = 8, - .i2s = 1, -}; - -void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio) -{ - struct msm_dp_audio_private *audio_priv; - - audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); - - if (audio_priv->audio_pdev) { - platform_device_unregister(audio_priv->audio_pdev); - audio_priv->audio_pdev = NULL; - } -} - -int msm_dp_register_audio_driver(struct device *dev, - struct msm_dp_audio *msm_dp_audio) -{ - struct msm_dp_audio_private *audio_priv; - - audio_priv = container_of(msm_dp_audio, - struct msm_dp_audio_private, msm_dp_audio); - - audio_priv->audio_pdev = platform_device_register_data(dev, - HDMI_CODEC_DRV_NAME, - PLATFORM_DEVID_AUTO, - &codec_data, - sizeof(codec_data)); - return PTR_ERR_OR_ZERO(audio_priv->audio_pdev); -} - struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev, struct msm_dp_catalog *catalog) { diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h index beea34cbab77..58fc14693e48 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.h +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -36,23 +36,6 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev, struct msm_dp_catalog *catalog); /** - * msm_dp_register_audio_driver() - * - * Registers DP device with hdmi_codec interface. - * - * @dev: DP device instance. - * @msm_dp_audio: an instance of msm_dp_audio module. - * - * - * Returns the error code in case of failure, otherwise - * zero on success. - */ -int msm_dp_register_audio_driver(struct device *dev, - struct msm_dp_audio *msm_dp_audio); - -void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio); - -/** * msm_dp_audio_put() * * Cleans the msm_dp_audio instance. @@ -61,10 +44,12 @@ void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm */ void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio); -int msm_dp_audio_hw_params(struct device *dev, - void *data, - struct hdmi_codec_daifmt *daifmt, - struct hdmi_codec_params *params); +int msm_dp_audio_prepare(struct drm_connector *connector, + struct drm_bridge *bridge, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params); +void msm_dp_audio_shutdown(struct drm_connector *connector, + struct drm_bridge *bridge); #endif /* _DP_AUDIO_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 9c463ae2f8fa..a50bfafbb4ea 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -11,6 +11,7 @@ #include <linux/phy/phy.h> #include <linux/phy/phy-dp.h> #include <linux/pm_opp.h> +#include <linux/string_choices.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_fixed.h> @@ -1033,10 +1034,12 @@ static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl, return 0; } -static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl) +static int msm_dp_ctrl_update_phy_vx_px(struct msm_dp_ctrl_private *ctrl, + enum drm_dp_phy dp_phy) { struct msm_dp_link *link = ctrl->link; - int ret = 0, lane, lane_cnt; + int lane, lane_cnt, reg; + int ret = 0; u8 buf[4]; u32 max_level_reached = 0; u32 voltage_swing_level = link->phy_params.v_level; @@ -1074,8 +1077,13 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl) drm_dbg_dp(ctrl->drm_dev, "sink: p|v=0x%x\n", voltage_swing_level | pre_emphasis_level); - ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET, - buf, lane_cnt); + + if (dp_phy == DP_PHY_DPRX) + reg = DP_TRAINING_LANE0_SET; + else + reg = DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); + + ret = drm_dp_dpcd_write(ctrl->aux, reg, buf, lane_cnt); if (ret == lane_cnt) ret = 0; @@ -1083,9 +1091,10 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl) } static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl, - u8 pattern) + u8 pattern, enum drm_dp_phy dp_phy) { u8 buf; + int reg; int ret = 0; drm_dbg_dp(ctrl->drm_dev, "sink: pattern=%x\n", pattern); @@ -1095,31 +1104,26 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl, if (pattern && pattern != DP_TRAINING_PATTERN_4) buf |= DP_LINK_SCRAMBLING_DISABLE; - ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf); - return ret == 1; -} - -static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl, - u8 *link_status) -{ - int ret = 0, len; - - len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); - if (len != DP_LINK_STATUS_SIZE) { - DRM_ERROR("DP link status read failed, err: %d\n", len); - ret = -EINVAL; - } + if (dp_phy == DP_PHY_DPRX) + reg = DP_TRAINING_PATTERN_SET; + else + reg = DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy); - return ret; + ret = drm_dp_dpcd_writeb(ctrl->aux, reg, buf); + return ret == 1; } static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, - int *training_step) + int *training_step, enum drm_dp_phy dp_phy) { + int delay_us; int tries, old_v_level, ret = 0; u8 link_status[DP_LINK_STATUS_SIZE]; int const maximum_retries = 4; + delay_us = drm_dp_read_clock_recovery_delay(ctrl->aux, + ctrl->panel->dpcd, dp_phy, false); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); *training_step = DP_TRAINING_1; @@ -1128,18 +1132,19 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, if (ret) return ret; msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | - DP_LINK_SCRAMBLING_DISABLE); + DP_LINK_SCRAMBLING_DISABLE, dp_phy); - ret = msm_dp_ctrl_update_vx_px(ctrl); + msm_dp_link_reset_phy_params_vx_px(ctrl->link); + ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy); if (ret) return ret; tries = 0; old_v_level = ctrl->link->phy_params.v_level; for (tries = 0; tries < maximum_retries; tries++) { - drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd); + fsleep(delay_us); - ret = msm_dp_ctrl_read_link_status(ctrl, link_status); + ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status); if (ret) return ret; @@ -1160,7 +1165,7 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, } msm_dp_link_adjust_levels(ctrl->link, link_status); - ret = msm_dp_ctrl_update_vx_px(ctrl); + ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy); if (ret) return ret; } @@ -1212,21 +1217,31 @@ static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl) return 0; } -static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl) +static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl, + enum drm_dp_phy dp_phy) { - msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); - drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); + int delay_us; + + msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE, dp_phy); + + delay_us = drm_dp_read_channel_eq_delay(ctrl->aux, + ctrl->panel->dpcd, dp_phy, false); + fsleep(delay_us); } static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl, - int *training_step) + int *training_step, enum drm_dp_phy dp_phy) { + int delay_us; int tries = 0, ret = 0; u8 pattern; u32 state_ctrl_bit; int const maximum_retries = 5; u8 link_status[DP_LINK_STATUS_SIZE]; + delay_us = drm_dp_read_channel_eq_delay(ctrl->aux, + ctrl->panel->dpcd, dp_phy, false); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); *training_step = DP_TRAINING_2; @@ -1246,12 +1261,12 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl, if (ret) return ret; - msm_dp_ctrl_train_pattern_set(ctrl, pattern); + msm_dp_ctrl_train_pattern_set(ctrl, pattern, dp_phy); for (tries = 0; tries <= maximum_retries; tries++) { - drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); + fsleep(delay_us); - ret = msm_dp_ctrl_read_link_status(ctrl, link_status); + ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status); if (ret) return ret; @@ -1261,7 +1276,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl, } msm_dp_link_adjust_levels(ctrl->link, link_status); - ret = msm_dp_ctrl_update_vx_px(ctrl); + ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy); if (ret) return ret; @@ -1270,9 +1285,32 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl, return -ETIMEDOUT; } +static int msm_dp_ctrl_link_train_1_2(struct msm_dp_ctrl_private *ctrl, + int *training_step, enum drm_dp_phy dp_phy) +{ + int ret; + + ret = msm_dp_ctrl_link_train_1(ctrl, training_step, dp_phy); + if (ret) { + DRM_ERROR("link training #1 on phy %d failed. ret=%d\n", dp_phy, ret); + return ret; + } + drm_dbg_dp(ctrl->drm_dev, "link training #1 on phy %d successful\n", dp_phy); + + ret = msm_dp_ctrl_link_train_2(ctrl, training_step, dp_phy); + if (ret) { + DRM_ERROR("link training #2 on phy %d failed. ret=%d\n", dp_phy, ret); + return ret; + } + drm_dbg_dp(ctrl->drm_dev, "link training #2 on phy %d successful\n", dp_phy); + + return 0; +} + static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl, int *training_step) { + int i; int ret = 0; const u8 *dpcd = ctrl->panel->dpcd; u8 encoding[] = { 0, DP_SET_ANSI_8B10B }; @@ -1285,8 +1323,6 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl, link_info.rate = ctrl->link->link_params.rate; link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; - msm_dp_link_reset_phy_params_vx_px(ctrl->link); - msm_dp_aux_link_configure(ctrl->aux, &link_info); if (drm_dp_max_downspread(dpcd)) @@ -1301,24 +1337,27 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl, &assr, 1); } - ret = msm_dp_ctrl_link_train_1(ctrl, training_step); + for (i = ctrl->link->lttpr_count - 1; i >= 0; i--) { + enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i); + + ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, dp_phy); + msm_dp_ctrl_clear_training_pattern(ctrl, dp_phy); + + if (ret) + break; + } + if (ret) { - DRM_ERROR("link training #1 failed. ret=%d\n", ret); + DRM_ERROR("link training of LTTPR(s) failed. ret=%d\n", ret); goto end; } - /* print success info as this is a result of user initiated action */ - drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n"); - - ret = msm_dp_ctrl_link_train_2(ctrl, training_step); + ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, DP_PHY_DPRX); if (ret) { - DRM_ERROR("link training #2 failed. ret=%d\n", ret); + DRM_ERROR("link training on sink failed. ret=%d\n", ret); goto end; } - /* print success info as this is a result of user initiated action */ - drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n"); - end: msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); @@ -1366,9 +1405,9 @@ int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "enable core clocks \n"); drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n", - ctrl->stream_clks_on ? "on" : "off", - ctrl->link_clks_on ? "on" : "off", - ctrl->core_clks_on ? "on" : "off"); + str_on_off(ctrl->stream_clks_on), + str_on_off(ctrl->link_clks_on), + str_on_off(ctrl->core_clks_on)); return 0; } @@ -1385,9 +1424,9 @@ void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "disable core clocks \n"); drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n", - ctrl->stream_clks_on ? "on" : "off", - ctrl->link_clks_on ? "on" : "off", - ctrl->core_clks_on ? "on" : "off"); + str_on_off(ctrl->stream_clks_on), + str_on_off(ctrl->link_clks_on), + str_on_off(ctrl->core_clks_on)); } static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl) @@ -1416,9 +1455,9 @@ static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "enable link clocks\n"); drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n", - ctrl->stream_clks_on ? "on" : "off", - ctrl->link_clks_on ? "on" : "off", - ctrl->core_clks_on ? "on" : "off"); + str_on_off(ctrl->stream_clks_on), + str_on_off(ctrl->link_clks_on), + str_on_off(ctrl->core_clks_on)); return 0; } @@ -1435,9 +1474,9 @@ static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "disabled link clocks\n"); drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n", - ctrl->stream_clks_on ? "on" : "off", - ctrl->link_clks_on ? "on" : "off", - ctrl->core_clks_on ? "on" : "off"); + str_on_off(ctrl->stream_clks_on), + str_on_off(ctrl->link_clks_on), + str_on_off(ctrl->core_clks_on)); } static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl) @@ -1635,7 +1674,7 @@ static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl) if (ret) goto end; - msm_dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX); msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); @@ -1659,7 +1698,7 @@ static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl) return false; } msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); - msm_dp_ctrl_update_vx_px(ctrl); + msm_dp_ctrl_update_phy_vx_px(ctrl, DP_PHY_DPRX); msm_dp_link_send_test_response(ctrl->link); pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); @@ -1804,7 +1843,7 @@ static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl) u8 link_status[DP_LINK_STATUS_SIZE]; int num_lanes = ctrl->link->link_params.num_lanes; - msm_dp_ctrl_read_link_status(ctrl, link_status); + drm_dp_dpcd_read_link_status(ctrl->aux, link_status); return drm_dp_channel_eq_ok(link_status, num_lanes); } @@ -1862,7 +1901,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - msm_dp_ctrl_read_link_status(ctrl, link_status); + drm_dp_dpcd_read_link_status(ctrl->aux, link_status); rc = msm_dp_ctrl_link_rate_down_shift(ctrl); if (rc < 0) { /* already in RBR = 1.6G */ @@ -1887,7 +1926,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - msm_dp_ctrl_read_link_status(ctrl, link_status); + drm_dp_dpcd_read_link_status(ctrl->aux, link_status); if (!drm_dp_clock_recovery_ok(link_status, ctrl->link->link_params.num_lanes)) @@ -1901,7 +1940,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) } /* stop link training before start re training */ - msm_dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX); } rc = msm_dp_ctrl_reinitialize_mainlink(ctrl); @@ -1925,7 +1964,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) * link training failed * end txing train pattern here */ - msm_dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX); msm_dp_ctrl_deinitialize_mainlink(ctrl); rc = -ECONNRESET; @@ -1996,7 +2035,7 @@ int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train msm_dp_ctrl_link_retrain(ctrl); /* stop txing train pattern to end link training */ - msm_dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX); /* * Set up transfer unit values and set controller state to send diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 24dd37f1682b..a48e6db4f156 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -11,7 +11,9 @@ #include <linux/of_irq.h> #include <linux/phy/phy.h> #include <linux/delay.h> +#include <linux/string_choices.h> #include <drm/display/drm_dp_aux_bus.h> +#include <drm/display/drm_hdmi_audio_helper.h> #include <drm/drm_edid.h> #include "msm_drv.h" @@ -126,6 +128,11 @@ static const struct msm_dp_desc msm_dp_desc_sa8775p[] = { {} }; +static const struct msm_dp_desc msm_dp_desc_sdm845[] = { + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 }, + {} +}; + static const struct msm_dp_desc msm_dp_desc_sc7180[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} @@ -178,7 +185,7 @@ static const struct of_device_id msm_dp_dt_match[] = { { .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x }, { .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp }, { .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp }, - { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sdm845 }, { .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 }, { .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 }, { .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 }, @@ -287,13 +294,6 @@ static int msm_dp_display_bind(struct device *dev, struct device *master, goto end; } - - rc = msm_dp_register_audio_driver(dev, dp->audio); - if (rc) { - DRM_ERROR("Audio registration Dp failed\n"); - goto end; - } - rc = msm_dp_hpd_event_thread_start(dp); if (rc) { DRM_ERROR("Event thread create failed\n"); @@ -315,7 +315,6 @@ static void msm_dp_display_unbind(struct device *dev, struct device *master, of_dp_aux_depopulate_bus(dp->aux); - msm_dp_unregister_audio_driver(dev, dp->audio); msm_dp_aux_unregister(dp->aux); dp->drm_dev = NULL; dp->aux->drm_dev = NULL; @@ -343,8 +342,7 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d { if ((hpd && dp->msm_dp_display.link_ready) || (!hpd && !dp->msm_dp_display.link_ready)) { - drm_dbg_dp(dp->drm_dev, "HPD already %s\n", - (hpd ? "on" : "off")); + drm_dbg_dp(dp->drm_dev, "HPD already %s\n", str_on_off(hpd)); return 0; } @@ -367,11 +365,35 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d return 0; } +static int msm_dp_display_lttpr_init(struct msm_dp_display_private *dp, u8 *dpcd) +{ + int rc, lttpr_count; + + if (drm_dp_read_lttpr_common_caps(dp->aux, dpcd, dp->link->lttpr_common_caps)) + return 0; + + lttpr_count = drm_dp_lttpr_count(dp->link->lttpr_common_caps); + rc = drm_dp_lttpr_init(dp->aux, lttpr_count); + if (rc) { + DRM_ERROR("failed to set LTTPRs transparency mode, rc=%d\n", rc); + return 0; + } + + return lttpr_count; +} + static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp) { struct drm_connector *connector = dp->msm_dp_display.connector; const struct drm_display_info *info = &connector->display_info; int rc = 0; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + + rc = drm_dp_read_dpcd_caps(dp->aux, dpcd); + if (rc) + goto end; + + dp->link->lttpr_count = msm_dp_display_lttpr_init(dp, dpcd); rc = msm_dp_panel_read_sink_caps(dp->panel, connector); if (rc) @@ -611,9 +633,9 @@ static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display, struct msm_dp_display_private, msm_dp_display); /* notify audio subsystem only if sink supports audio */ - if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev && - dp->audio_supported) - msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged); + if (dp->audio_supported) + drm_connector_hdmi_audio_plugged_notify(msm_dp_display->connector, + plugged); } static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data) @@ -892,19 +914,6 @@ static int msm_dp_display_disable(struct msm_dp_display_private *dp) return 0; } -int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display, - hdmi_codec_plugged_cb fn, struct device *codec_dev) -{ - bool plugged; - - msm_dp_display->plugged_cb = fn; - msm_dp_display->codec_dev = codec_dev; - plugged = msm_dp_display->link_ready; - msm_dp_display_handle_plugged_change(msm_dp_display, plugged); - - return 0; -} - /** * msm_dp_bridge_mode_valid - callback to determine if specified mode is valid * @bridge: Pointer to drm bridge structure @@ -930,16 +939,17 @@ enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge, return -EINVAL; } - if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) - return MODE_CLOCK_HIGH; - msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); link_info = &msm_dp_display->panel->link_info; - if (drm_mode_is_420_only(&dp->connector->display_info, mode) && - msm_dp_display->panel->vsc_sdp_supported) + if ((drm_mode_is_420_only(&dp->connector->display_info, mode) && + msm_dp_display->panel->vsc_sdp_supported) || + msm_dp_wide_bus_available(dp)) mode_pclk_khz /= 2; + if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) + return MODE_CLOCK_HIGH; + mode_bpp = dp->connector->display_info.bpc * num_components; if (!mode_bpp) mode_bpp = default_bpp; @@ -1491,13 +1501,13 @@ int msm_dp_modeset_init(struct msm_dp *msm_dp_display, struct drm_device *dev, } void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, - struct drm_bridge_state *old_bridge_state) + struct drm_atomic_state *state) { struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); struct msm_dp *dp = msm_dp_bridge->msm_dp_display; int rc = 0; struct msm_dp_display_private *msm_dp_display; - u32 state; + u32 hpd_state; bool force_link_train = false; msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); @@ -1516,8 +1526,8 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, return; } - state = msm_dp_display->hpd_state; - if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) { + hpd_state = msm_dp_display->hpd_state; + if (hpd_state != ST_DISPLAY_OFF && hpd_state != ST_MAINLINK_READY) { mutex_unlock(&msm_dp_display->event_mutex); return; } @@ -1529,9 +1539,9 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, return; } - state = msm_dp_display->hpd_state; + hpd_state = msm_dp_display->hpd_state; - if (state == ST_DISPLAY_OFF) { + if (hpd_state == ST_DISPLAY_OFF) { msm_dp_display_host_phy_init(msm_dp_display); force_link_train = true; } @@ -1552,7 +1562,7 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, } void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, - struct drm_bridge_state *old_bridge_state) + struct drm_atomic_state *state) { struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); struct msm_dp *dp = msm_dp_bridge->msm_dp_display; @@ -1564,11 +1574,11 @@ void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, } void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, - struct drm_bridge_state *old_bridge_state) + struct drm_atomic_state *state) { struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); struct msm_dp *dp = msm_dp_bridge->msm_dp_display; - u32 state; + u32 hpd_state; struct msm_dp_display_private *msm_dp_display; msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); @@ -1578,15 +1588,15 @@ void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, mutex_lock(&msm_dp_display->event_mutex); - state = msm_dp_display->hpd_state; - if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) + hpd_state = msm_dp_display->hpd_state; + if (hpd_state != ST_DISCONNECT_PENDING && hpd_state != ST_CONNECTED) drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n", - dp->connector_type, state); + dp->connector_type, hpd_state); msm_dp_display_disable(msm_dp_display); - state = msm_dp_display->hpd_state; - if (state == ST_DISCONNECT_PENDING) { + hpd_state = msm_dp_display->hpd_state; + if (hpd_state == ST_DISCONNECT_PENDING) { /* completed disconnection */ msm_dp_display->hpd_state = ST_DISCONNECTED; } else { diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index ecbc2d92f546..cc6e2cab36e9 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -7,7 +7,6 @@ #define _DP_DISPLAY_H_ #include "dp_panel.h" -#include <sound/hdmi-codec.h> #include "disp/msm_disp_snapshot.h" #define DP_MAX_PIXEL_CLK_KHZ 675000 @@ -15,7 +14,6 @@ struct msm_dp { struct drm_device *drm_dev; struct platform_device *pdev; - struct device *codec_dev; struct drm_connector *connector; struct drm_bridge *next_bridge; bool link_ready; @@ -25,14 +23,10 @@ struct msm_dp { bool is_edp; bool internal_hpd; - hdmi_codec_plugged_cb plugged_cb; - struct msm_dp_audio *msm_dp_audio; bool psr_supported; }; -int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display, - hdmi_codec_plugged_cb fn, struct device *codec_dev); int msm_dp_display_get_modes(struct msm_dp *msm_dp_display); bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display); int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display); diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index d3e241ea6941..f222d7ccaa88 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -3,6 +3,7 @@ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ +#include <linux/string_choices.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_bridge.h> @@ -11,6 +12,7 @@ #include "msm_drv.h" #include "msm_kms.h" +#include "dp_audio.h" #include "dp_drm.h" /** @@ -25,7 +27,7 @@ static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge) dp = to_dp_bridge(bridge)->msm_dp_display; drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", - (dp->link_ready) ? "true" : "false"); + str_true_false(dp->link_ready)); return (dp->link_ready) ? connector_status_connected : connector_status_disconnected; @@ -41,7 +43,7 @@ static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge, dp = to_dp_bridge(bridge)->msm_dp_display; drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", - (dp->link_ready) ? "true" : "false"); + str_true_false(dp->link_ready)); /* * There is no protection in the DRM framework to check if the display @@ -113,6 +115,9 @@ static const struct drm_bridge_funcs msm_dp_bridge_ops = { .hpd_disable = msm_dp_bridge_hpd_disable, .hpd_notify = msm_dp_bridge_hpd_notify, .debugfs_init = msm_dp_bridge_debugfs_init, + + .dp_audio_prepare = msm_dp_audio_prepare, + .dp_audio_shutdown = msm_dp_audio_shutdown, }; static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge, @@ -137,9 +142,8 @@ static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge, } static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, - struct drm_bridge_state *old_bridge_state) + struct drm_atomic_state *state) { - struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); @@ -151,25 +155,24 @@ static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, * If the panel is in psr, just exit psr state and skip the full * bridge enable sequence. */ - crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state, + crtc = drm_atomic_get_new_crtc_for_encoder(state, drm_bridge->encoder); if (!crtc) return; - old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); + old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); if (old_crtc_state && old_crtc_state->self_refresh_active) { msm_dp_display_set_psr(dp, false); return; } - msm_dp_bridge_atomic_enable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_enable(drm_bridge, state); } static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, - struct drm_bridge_state *old_bridge_state) + struct drm_atomic_state *atomic_state) { - struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL; struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); @@ -208,13 +211,12 @@ static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, } out: - msm_dp_bridge_atomic_disable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_disable(drm_bridge, atomic_state); } static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, - struct drm_bridge_state *old_bridge_state) + struct drm_atomic_state *atomic_state) { - struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state = NULL; @@ -233,7 +235,7 @@ static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, if (new_crtc_state->self_refresh_active) return; - msm_dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_post_disable(drm_bridge, atomic_state); } /** @@ -257,7 +259,10 @@ static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge, return -EINVAL; } - if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) + if (msm_dp_wide_bus_available(dp)) + mode_pclk_khz /= 2; + + if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) return MODE_CLOCK_HIGH; /* @@ -295,14 +300,15 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, struct msm_dp_bridge *msm_dp_bridge; struct drm_bridge *bridge; - msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL); - if (!msm_dp_bridge) - return -ENOMEM; + msm_dp_bridge = devm_drm_bridge_alloc(dev->dev, struct msm_dp_bridge, bridge, + msm_dp_display->is_edp ? &msm_edp_bridge_ops : + &msm_dp_bridge_ops); + if (IS_ERR(msm_dp_bridge)) + return PTR_ERR(msm_dp_bridge); msm_dp_bridge->msm_dp_display = msm_dp_display; bridge = &msm_dp_bridge->bridge; - bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops; bridge->type = msm_dp_display->connector_type; bridge->ycbcr_420_allowed = yuv_supported; @@ -319,9 +325,13 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, */ if (!msm_dp_display->is_edp) { bridge->ops = + DRM_BRIDGE_OP_DP_AUDIO | DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES; + bridge->hdmi_audio_dev = &msm_dp_display->pdev->dev; + bridge->hdmi_audio_max_i2s_playback_channels = 8; + bridge->hdmi_audio_dai_port = -1; } rc = devm_drm_bridge_add(dev->dev, bridge); diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h index 8eae2f74839f..d8c9b905f8bf 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.h +++ b/drivers/gpu/drm/msm/dp/dp_drm.h @@ -26,11 +26,11 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, bool yuv_supported); void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, - struct drm_bridge_state *old_bridge_state); + struct drm_atomic_state *state); void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, - struct drm_bridge_state *old_bridge_state); + struct drm_atomic_state *state); void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, - struct drm_bridge_state *old_bridge_state); + struct drm_atomic_state *state); enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode); diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 1a1fbb2d7d4f..92a9077959b3 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -714,21 +714,21 @@ end: static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link) { - int len; + int ret; link->prev_sink_count = link->msm_dp_link.sink_count; - len = drm_dp_read_sink_count(link->aux); - if (len < 0) { + ret = drm_dp_read_sink_count(link->aux); + if (ret < 0) { DRM_ERROR("DP parse sink count failed\n"); - return len; + return ret; } - link->msm_dp_link.sink_count = len; + link->msm_dp_link.sink_count = ret; - len = drm_dp_dpcd_read_link_status(link->aux, - link->link_status); - if (len < DP_LINK_STATUS_SIZE) { + ret = drm_dp_dpcd_read_link_status(link->aux, + link->link_status); + if (ret < 0) { DRM_ERROR("DP link status read failed\n"); - return len; + return ret; } return msm_dp_link_parse_request(link); diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h index 8db5d5698a97..ba47c6d19fbf 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.h +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -7,6 +7,7 @@ #define _DP_LINK_H_ #include "dp_aux.h" +#include <drm/display/drm_dp_helper.h> #define DS_PORT_STATUS_CHANGED 0x200 #define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF @@ -60,6 +61,9 @@ struct msm_dp_link_phy_params { }; struct msm_dp_link { + u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE]; + int lttpr_count; + u32 sink_request; u32 test_response; diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 92415bf8aa16..4e8ab75c771b 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -47,7 +47,7 @@ static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel) static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel) { - int rc; + int rc, max_lttpr_lanes, max_lttpr_rate; struct msm_dp_panel_private *panel; struct msm_dp_link_info *link_info; u8 *dpcd, major, minor; @@ -75,6 +75,16 @@ static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel) if (link_info->rate > msm_dp_panel->max_dp_link_rate) link_info->rate = msm_dp_panel->max_dp_link_rate; + /* Limit data lanes from LTTPR capabilities, if any */ + max_lttpr_lanes = drm_dp_lttpr_max_lane_count(panel->link->lttpr_common_caps); + if (max_lttpr_lanes && max_lttpr_lanes < link_info->num_lanes) + link_info->num_lanes = max_lttpr_lanes; + + /* Limit link rate from LTTPR capabilities, if any */ + max_lttpr_rate = drm_dp_lttpr_max_link_rate(panel->link->lttpr_common_caps); + if (max_lttpr_rate && max_lttpr_rate < link_info->rate) + link_info->rate = max_lttpr_rate; + drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor); drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate); drm_dbg_dp(panel->drm_dev, "lane_count=%d\n", link_info->num_lanes); diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 7754dcec33d0..7675558ae2e5 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -221,6 +221,22 @@ static const struct msm_dsi_config sc7280_dsi_cfg = { }, }; +static const struct regulator_bulk_data sa8775p_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 8300 }, /* 1.2 V */ + { .supply = "refgen" }, +}; + +static const struct msm_dsi_config sa8775p_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sa8775p_dsi_regulators, + .num_regulators = ARRAY_SIZE(sa8775p_dsi_regulators), + .bus_clk_names = dsi_v2_4_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names), + .io_start = { + { 0xae94000, 0xae96000 }, + }, +}; + static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { .link_clk_set_rate = dsi_link_clk_set_rate_v2, .link_clk_enable = dsi_link_clk_enable_v2, @@ -294,6 +310,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0, &sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_1, + &sa8775p_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_6_0, &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0, diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index 120cb65164c1..65b0705fac0e 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -27,6 +27,7 @@ #define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000 #define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 #define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000 +#define MSM_DSI_6G_VER_MINOR_V2_5_1 0x20050001 #define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000 #define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000 #define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000 diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 007311c21fda..4d75529c0e85 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -179,18 +179,18 @@ struct msm_dsi_host { int irq; }; - static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) { return readl(msm_host->ctrl_base + reg); } + static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) { writel(data, msm_host->ctrl_base + reg); } -static const struct msm_dsi_cfg_handler *dsi_get_config( - struct msm_dsi_host *msm_host) +static const struct msm_dsi_cfg_handler * +dsi_get_config(struct msm_dsi_host *msm_host) { const struct msm_dsi_cfg_handler *cfg_hnd = NULL; struct device *dev = &msm_host->pdev->dev; @@ -200,7 +200,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( ahb_clk = msm_clk_get(msm_host->pdev, "iface"); if (IS_ERR(ahb_clk)) { - pr_err("%s: cannot get interface clock\n", __func__); + dev_err_probe(dev, PTR_ERR(ahb_clk), "%s: cannot get interface clock\n", + __func__); goto exit; } @@ -208,13 +209,13 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( ret = clk_prepare_enable(ahb_clk); if (ret) { - pr_err("%s: unable to enable ahb_clk\n", __func__); + dev_err_probe(dev, ret, "%s: unable to enable ahb_clk\n", __func__); goto runtime_put; } ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); if (ret) { - pr_err("%s: Invalid version\n", __func__); + dev_err_probe(dev, ret, "%s: Invalid version\n", __func__); goto disable_clks; } @@ -281,42 +282,31 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) msm_host->num_bus_clks = cfg->num_bus_clks; ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks); - if (ret < 0) { - dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret); - goto exit; - } + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Unable to get clocks\n"); /* get link and source clocks */ msm_host->byte_clk = msm_clk_get(pdev, "byte"); - if (IS_ERR(msm_host->byte_clk)) { - ret = PTR_ERR(msm_host->byte_clk); - pr_err("%s: can't find dsi_byte clock. ret=%d\n", - __func__, ret); - msm_host->byte_clk = NULL; - goto exit; - } + if (IS_ERR(msm_host->byte_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->byte_clk), + "%s: can't find dsi_byte clock\n", + __func__); msm_host->pixel_clk = msm_clk_get(pdev, "pixel"); - if (IS_ERR(msm_host->pixel_clk)) { - ret = PTR_ERR(msm_host->pixel_clk); - pr_err("%s: can't find dsi_pixel clock. ret=%d\n", - __func__, ret); - msm_host->pixel_clk = NULL; - goto exit; - } + if (IS_ERR(msm_host->pixel_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->pixel_clk), + "%s: can't find dsi_pixel clock\n", + __func__); msm_host->esc_clk = msm_clk_get(pdev, "core"); - if (IS_ERR(msm_host->esc_clk)) { - ret = PTR_ERR(msm_host->esc_clk); - pr_err("%s: can't find dsi_esc clock. ret=%d\n", - __func__, ret); - msm_host->esc_clk = NULL; - goto exit; - } + if (IS_ERR(msm_host->esc_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->esc_clk), + "%s: can't find dsi_esc clock\n", + __func__); if (cfg_hnd->ops->clk_init_ver) ret = cfg_hnd->ops->clk_init_ver(msm_host); -exit: + return ret; } @@ -380,7 +370,6 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) return 0; } - int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) { int ret; @@ -598,7 +587,6 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate, msm_host->byte_clk_rate); - } int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi) @@ -687,8 +675,8 @@ static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags) return NON_BURST_SYNCH_EVENT; } -static inline enum dsi_vid_dst_format dsi_get_vid_fmt( - const enum mipi_dsi_pixel_format mipi_fmt) +static inline enum dsi_vid_dst_format +dsi_get_vid_fmt(const enum mipi_dsi_pixel_format mipi_fmt) { switch (mipi_fmt) { case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888; @@ -699,8 +687,8 @@ static inline enum dsi_vid_dst_format dsi_get_vid_fmt( } } -static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( - const enum mipi_dsi_pixel_format mipi_fmt) +static inline enum dsi_cmd_dst_format +dsi_get_cmd_fmt(const enum mipi_dsi_pixel_format mipi_fmt) { switch (mipi_fmt) { case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; @@ -846,7 +834,7 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0)); } -static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay) +static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode) { struct drm_dsc_config *dsc = msm_host->dsc; u32 reg, reg_ctrl, reg_ctrl2; @@ -858,7 +846,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod /* first calculate dsc parameters and then program * compress mode registers */ - slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay); + slice_per_intf = dsc->slice_count; total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf; bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */ @@ -991,7 +979,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { if (msm_host->dsc) - dsi_update_dsc_timing(msm_host, false, mode->hdisplay); + dsi_update_dsc_timing(msm_host, false); dsi_write(msm_host, REG_DSI_ACTIVE_H, DSI_ACTIVE_H_START(ha_start) | @@ -1012,7 +1000,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); } else { /* command mode */ if (msm_host->dsc) - dsi_update_dsc_timing(msm_host, true, mode->hdisplay); + dsi_update_dsc_timing(msm_host, true); /* image data and 1 byte write_memory_start cmd */ if (!msm_host->dsc) @@ -1292,14 +1280,15 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host, static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg) { u8 *data = msg->rx_buf; + if (data && (msg->rx_len >= 1)) { *data = buf[1]; /* strip out dcs type */ return 1; - } else { - pr_err("%s: read data does not match with rx_buf len %zu\n", - __func__, msg->rx_len); - return -EINVAL; } + + pr_err("%s: read data does not match with rx_buf len %zu\n", + __func__, msg->rx_len); + return -EINVAL; } /* @@ -1308,15 +1297,16 @@ static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg) static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg) { u8 *data = msg->rx_buf; + if (data && (msg->rx_len >= 2)) { data[0] = buf[1]; /* strip out dcs type */ data[1] = buf[2]; return 2; - } else { - pr_err("%s: read data does not match with rx_buf len %zu\n", - __func__, msg->rx_len); - return -EINVAL; } + + pr_err("%s: read data does not match with rx_buf len %zu\n", + __func__, msg->rx_len); + return -EINVAL; } static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg) @@ -1376,8 +1366,9 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) ret = -ETIMEDOUT; else ret = len; - } else + } else { ret = len; + } return ret; } @@ -1445,11 +1436,12 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, return len; } - /* for video mode, do not send cmds more than - * one pixel line, since it only transmit it - * during BLLP. - */ - /* TODO: if the command is sent in LP mode, the bit rate is only + /* + * for video mode, do not send cmds more than + * one pixel line, since it only transmit it + * during BLLP. + * + * TODO: if the command is sent in LP mode, the bit rate is only * half of esc clk rate. In this case, if the video is already * actively streaming, we need to check more carefully if the * command can be fit into one BLLP. @@ -1767,8 +1759,20 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc return -EINVAL; } - if (dsc->bits_per_component != 8) { - DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n"); + switch (dsc->bits_per_component) { + case 8: + case 10: + case 12: + /* + * Only 8, 10, and 12 bpc are supported for DSC 1.1 block. + * If additional bpc values need to be supported, update + * this quard with the appropriate DSC version verification. + */ + break; + default: + DRM_DEV_ERROR(&msm_host->pdev->dev, + "Unsupported bits_per_component value: %d\n", + dsc->bits_per_component); return -EOPNOTSUPP; } @@ -1779,7 +1783,7 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc drm_dsc_set_const_params(dsc); drm_dsc_set_rc_buf_thresh(dsc); - /* handle only bpp = bpc = 8, pre-SCR panels */ + /* DPU supports only pre-SCR panels */ ret = drm_dsc_setup_rc_params(dsc, DRM_DSC_1_1_PRE_SCR); if (ret) { DRM_DEV_ERROR(&msm_host->pdev->dev, "could not find DSC RC parameters\n"); @@ -1827,8 +1831,15 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) __func__, ret); goto err; } - if (!ret) + if (!ret) { msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL); + if (!msm_dsi->te_source) { + DRM_DEV_ERROR(dev, "%s: failed to allocate te_source\n", + __func__); + ret = -ENOMEM; + goto err; + } + } ret = 0; if (of_property_present(np, "syscon-sfpb")) { @@ -1874,39 +1885,35 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) int ret; msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); - if (!msm_host) { + if (!msm_host) return -ENOMEM; - } msm_host->pdev = pdev; msm_dsi->host = &msm_host->base; ret = dsi_host_parse_dt(msm_host); - if (ret) { - pr_err("%s: failed to parse dt\n", __func__); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "%s: failed to parse dt\n", + __func__); msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size); - if (IS_ERR(msm_host->ctrl_base)) { - pr_err("%s: unable to map Dsi ctrl base\n", __func__); - return PTR_ERR(msm_host->ctrl_base); - } + if (IS_ERR(msm_host->ctrl_base)) + return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->ctrl_base), + "%s: unable to map Dsi ctrl base\n", __func__); pm_runtime_enable(&pdev->dev); msm_host->cfg_hnd = dsi_get_config(msm_host); - if (!msm_host->cfg_hnd) { - pr_err("%s: get config failed\n", __func__); - return -EINVAL; - } + if (!msm_host->cfg_hnd) + return dev_err_probe(&pdev->dev, -EINVAL, + "%s: get config failed\n", __func__); cfg = msm_host->cfg_hnd->cfg; msm_host->id = dsi_host_get_id(msm_host); - if (msm_host->id < 0) { - pr_err("%s: unable to identify DSI host index\n", __func__); - return msm_host->id; - } + if (msm_host->id < 0) + return dev_err_probe(&pdev->dev, msm_host->id, + "%s: unable to identify DSI host index\n", + __func__); /* fixup base address by io offset */ msm_host->ctrl_base += cfg->io_offset; @@ -1918,42 +1925,32 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) return ret; ret = dsi_clk_init(msm_host); - if (ret) { - pr_err("%s: unable to initialize dsi clks\n", __func__); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "%s: unable to initialize dsi clks\n", __func__); msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL); - if (!msm_host->rx_buf) { - pr_err("%s: alloc rx temp buf failed\n", __func__); + if (!msm_host->rx_buf) return -ENOMEM; - } ret = devm_pm_opp_set_clkname(&pdev->dev, "byte"); if (ret) return ret; /* OPP table is optional */ ret = devm_pm_opp_of_add_table(&pdev->dev); - if (ret && ret != -ENODEV) { - dev_err(&pdev->dev, "invalid OPP table in device tree\n"); - return ret; - } + if (ret && ret != -ENODEV) + return dev_err_probe(&pdev->dev, ret, "invalid OPP table in device tree\n"); msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - if (!msm_host->irq) { - dev_err(&pdev->dev, "failed to get irq\n"); - return -EINVAL; - } + if (!msm_host->irq) + return dev_err_probe(&pdev->dev, -EINVAL, "failed to get irq\n"); /* do not autoenable, will be enabled later */ ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, "dsi_isr", msm_host); - if (ret < 0) { - dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", - msm_host->irq, ret); - return ret; - } + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "failed to request IRQ%u\n", + msm_host->irq); init_completion(&msm_host->dma_comp); init_completion(&msm_host->video_comp); diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index a210b7c9e5ca..ca400924d4ee 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -74,17 +74,35 @@ static int dsi_mgr_setup_components(int id) int ret; if (!IS_BONDED_DSI()) { + /* + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + ret = msm_dsi_host_register(msm_dsi->host); if (ret) return ret; - - msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); } else if (other_dsi) { struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? msm_dsi : other_dsi; struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? other_dsi : msm_dsi; + + /* + * PLL0 is to drive both DSI link clocks in bonded DSI mode. + * + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(clk_master_dsi->phy, + MSM_DSI_PHY_MASTER); + msm_dsi_phy_set_usecase(clk_slave_dsi->phy, + MSM_DSI_PHY_SLAVE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); + /* Register slave host first, so that slave DSI device * has a chance to probe, and do not block the master * DSI device's probe. @@ -98,14 +116,6 @@ static int dsi_mgr_setup_components(int id) ret = msm_dsi_host_register(master_link_dsi->host); if (ret) return ret; - - /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */ - msm_dsi_phy_set_usecase(clk_master_dsi->phy, - MSM_DSI_PHY_MASTER); - msm_dsi_phy_set_usecase(clk_slave_dsi->phy, - MSM_DSI_PHY_SLAVE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); - msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); } return 0; @@ -424,12 +434,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge, } static int dsi_mgr_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { int id = dsi_mgr_bridge_get_id(bridge); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge, + return drm_bridge_attach(encoder, msm_dsi->next_bridge, bridge, flags); } @@ -451,15 +462,14 @@ int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi, struct drm_connector *connector; int ret; - dsi_bridge = devm_kzalloc(msm_dsi->dev->dev, - sizeof(*dsi_bridge), GFP_KERNEL); - if (!dsi_bridge) - return -ENOMEM; + dsi_bridge = devm_drm_bridge_alloc(msm_dsi->dev->dev, struct dsi_bridge, base, + &dsi_mgr_bridge_funcs); + if (IS_ERR(dsi_bridge)) + return PTR_ERR(dsi_bridge); dsi_bridge->id = msm_dsi->id; bridge = &dsi_bridge->base; - bridge->funcs = &dsi_mgr_bridge_funcs; ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge); if (ret) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index c0bcc6828963..5973d7325699 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -581,6 +581,10 @@ static const struct of_device_id dsi_phy_dt_match[] = { .data = &dsi_phy_7nm_cfgs }, { .compatible = "qcom,dsi-phy-7nm-8150", .data = &dsi_phy_7nm_8150_cfgs }, + { .compatible = "qcom,sa8775p-dsi-phy-5nm", + .data = &dsi_phy_5nm_8775p_cfgs }, + { .compatible = "qcom,sar2130p-dsi-phy-5nm", + .data = &dsi_phy_5nm_sar2130p_cfgs }, { .compatible = "qcom,sc7280-dsi-phy-7nm", .data = &dsi_phy_7nm_7280_cfgs }, { .compatible = "qcom,sm6375-dsi-phy-7nm", diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index 8985818bb2e0..7ea608f620fe 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -6,6 +6,7 @@ #ifndef __DSI_PHY_H__ #define __DSI_PHY_H__ +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> @@ -58,6 +59,8 @@ extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs; @@ -84,9 +87,7 @@ struct msm_dsi_dphy_timing { u8 hs_halfbyte_en_ckln; }; -#define DSI_BYTE_PLL_CLK 0 -#define DSI_PIXEL_PLL_CLK 1 -#define NUM_PROVIDED_CLKS 2 +#define NUM_PROVIDED_CLKS (DSI_PIXEL_PLL_CLK + 1) #define DSI_LANE_MAX 5 diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index 677c62571811..af2e30f3f842 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -3,6 +3,7 @@ * Copyright (c) 2018, The Linux Foundation */ +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/iopoll.h> @@ -703,6 +704,13 @@ static int dsi_pll_10nm_init(struct msm_dsi_phy *phy) /* TODO: Remove this when we have proper display handover support */ msm_dsi_phy_pll_save_state(phy); + /* + * Store also proper vco_current_rate, because its value will be used in + * dsi_10nm_pll_restore_state(). + */ + if (!dsi_pll_10nm_vco_recalc_rate(&pll_10nm->clk_hw, VCO_REF_CLK_RATE)) + pll_10nm->vco_current_rate = pll_10nm->phy->cfg->min_pll_rate; + return 0; } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c index 2c3cbe0f2870..3a1c8ece6657 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -3,6 +3,7 @@ * Copyright (c) 2016, The Linux Foundation. All rights reserved. */ +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index 1383e3a4e050..90348a2af3e9 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -3,6 +3,7 @@ * Copyright (c) 2015, The Linux Foundation. All rights reserved. */ +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> #include <linux/clk.h> #include <linux/clk-provider.h> diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index 5311ab7f3c70..f3643320ff2f 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -3,6 +3,7 @@ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. */ +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> #include <linux/clk-provider.h> #include <linux/delay.h> diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 031446c87dae..c19890358b74 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -3,6 +3,8 @@ * Copyright (c) 2018, The Linux Foundation */ +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/iopoll.h> @@ -83,6 +85,9 @@ struct dsi_pll_7nm { /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */ spinlock_t postdiv_lock; + /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */ + spinlock_t pclk_mux_lock; + struct pll_7nm_cached_state cached_state; struct dsi_pll_7nm *slave; @@ -302,7 +307,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi writel(pll->phy->cphy_mode ? 0x00 : 0x10, base + REG_DSI_7nm_PHY_PLL_CMODE_1); writel(config->pll_clock_inverters, - base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS); + base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1); } static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, @@ -372,22 +377,41 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll) ndelay(250); } -static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll) +static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pll->postdiv_lock, flags); + writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); + spin_unlock_irqrestore(&pll->postdiv_lock, flags); +} + +static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask, + u32 val) { + unsigned long flags; u32 data; + spin_lock_irqsave(&pll->pclk_mux_lock, flags); data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + data &= ~mask; + data |= val & mask; + + writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + spin_unlock_irqrestore(&pll->pclk_mux_lock, flags); +} + +static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll) +{ + dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0); } static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll) { - u32 data; + u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL; writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3); - - data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1); } static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll) @@ -550,11 +574,11 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy) cached->pll_out_div &= 0x3; cmn_clk_cfg0 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); - cached->bit_clk_div = cmn_clk_cfg0 & 0xf; - cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; + cached->bit_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK, cmn_clk_cfg0); + cached->pix_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK, cmn_clk_cfg0); cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - cached->pll_mux = cmn_clk_cfg1 & 0x3; + cached->pll_mux = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, cmn_clk_cfg1); DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x", pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div, @@ -565,7 +589,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) { struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw); struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; - void __iomem *phy_base = pll_7nm->phy->base; u32 val; int ret; @@ -574,13 +597,11 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) val |= cached->pll_out_div; writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); - writel(cached->bit_clk_div | (cached->pix_clk_div << 4), - phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); - - val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - val &= ~0x3; - val |= cached->pll_mux; - writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_pll_cmn_clk_cfg0_write(pll_7nm, + DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) | + DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div)); + dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, + cached->pll_mux); ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw, pll_7nm->vco_current_rate, @@ -599,7 +620,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy) { struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw); - void __iomem *base = phy->base; u32 data = 0x0; /* internal PLL */ DBG("DSI PLL%d", pll_7nm->phy->id); @@ -618,7 +638,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy) } /* set PLL src */ - writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK, + DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data)); return 0; } @@ -718,11 +739,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide * don't register a pclk_mux clock and just use post_out_div instead */ if (pll_7nm->phy->cphy_mode) { - u32 data; - - data = readl(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - writel(data | 3, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - + dsi_pll_cmn_clk_cfg1_update(pll_7nm, + DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, + DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL(3)); phy_pll_out_dsi_parent = pll_post_out_div; } else { snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id); @@ -733,7 +752,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide pll_by_2_bit, }), 2, 0, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, - 0, 1, 0, NULL); + 0, 1, 0, &pll_7nm->pclk_mux_lock); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; @@ -778,6 +797,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy) pll_7nm_list[phy->id] = pll_7nm; spin_lock_init(&pll_7nm->postdiv_lock); + spin_lock_init(&pll_7nm->pclk_mux_lock); pll_7nm->phy = phy; @@ -1127,6 +1147,10 @@ static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = { { .supply = "vdds", .init_load_uA = 37550 }, }; +static const struct regulator_bulk_data dsi_phy_7nm_48000uA_regulators[] = { + { .supply = "vdds", .init_load_uA = 48000 }, +}; + static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = { { .supply = "vdds", .init_load_uA = 98000 }, }; @@ -1269,6 +1293,52 @@ const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs = { .quirks = DSI_PHY_7NM_QUIRK_V4_3, }; +const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_48000uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_48000uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V4_2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_97800uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_97800uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae95000, 0xae97000 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V5_2, +}; + const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = { .has_phy_lane = true, .regulator_data = dsi_phy_7nm_98400uA_regulators, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 37b3809c6bdd..2fd388b892dc 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -8,12 +8,13 @@ #include <linux/gpio/consumer.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <drm/drm_bridge_connector.h> #include <drm/drm_of.h> +#include <drm/display/drm_hdmi_state_helper.h> -#include <sound/hdmi-codec.h> #include "hdmi.h" void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on) @@ -24,7 +25,7 @@ void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on) spin_lock_irqsave(&hdmi->reg_lock, flags); if (power_on) { ctrl |= HDMI_CTRL_ENABLE; - if (!hdmi->hdmi_mode) { + if (!hdmi->connector->display_info.is_hdmi) { ctrl |= HDMI_CTRL_HDMI; hdmi_write(hdmi, REG_HDMI_CTRL, ctrl); ctrl &= ~HDMI_CTRL_HDMI; @@ -165,8 +166,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, hdmi->dev = dev; hdmi->encoder = encoder; - hdmi_audio_infoframe_init(&hdmi->audio.infoframe); - ret = msm_hdmi_bridge_init(hdmi); if (ret) { DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret); @@ -201,12 +200,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - ret = msm_hdmi_hpd_enable(hdmi->bridge); - if (ret < 0) { - DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); - goto fail; - } - return 0; fail: @@ -222,135 +215,26 @@ fail: * The hdmi device: */ -#define HDMI_CFG(item, entry) \ - .item ## _names = item ##_names_ ## entry, \ - .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry) - -static const char *hpd_reg_names_8960[] = {"core-vdda"}; -static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"}; +static const char * const pwr_reg_names_8960[] = {"core-vdda"}; +static const char * const pwr_clk_names_8960[] = {"core", "master_iface", "slave_iface"}; static const struct hdmi_platform_config hdmi_tx_8960_config = { - HDMI_CFG(hpd_reg, 8960), - HDMI_CFG(hpd_clk, 8960), + .pwr_reg_names = pwr_reg_names_8960, + .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8960), + .pwr_clk_names = pwr_clk_names_8960, + .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8960), }; -static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"}; -static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"}; -static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"}; -static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0}; +static const char * const pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"}; +static const char * const pwr_clk_names_8x74[] = {"iface", "core", "mdp_core", "alt_iface"}; static const struct hdmi_platform_config hdmi_tx_8974_config = { - HDMI_CFG(pwr_reg, 8x74), - HDMI_CFG(pwr_clk, 8x74), - HDMI_CFG(hpd_clk, 8x74), - .hpd_freq = hpd_clk_freq_8x74, + .pwr_reg_names = pwr_reg_names_8x74, + .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8x74), + .pwr_clk_names = pwr_clk_names_8x74, + .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8x74), }; -/* - * HDMI audio codec callbacks - */ -static int msm_hdmi_audio_hw_params(struct device *dev, void *data, - struct hdmi_codec_daifmt *daifmt, - struct hdmi_codec_params *params) -{ - struct hdmi *hdmi = dev_get_drvdata(dev); - unsigned int chan; - unsigned int channel_allocation = 0; - unsigned int rate; - unsigned int level_shift = 0; /* 0dB */ - bool down_mix = false; - - DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate, - params->sample_width, params->cea.channels); - - switch (params->cea.channels) { - case 2: - /* FR and FL speakers */ - channel_allocation = 0; - chan = MSM_HDMI_AUDIO_CHANNEL_2; - break; - case 4: - /* FC, LFE, FR and FL speakers */ - channel_allocation = 0x3; - chan = MSM_HDMI_AUDIO_CHANNEL_4; - break; - case 6: - /* RR, RL, FC, LFE, FR and FL speakers */ - channel_allocation = 0x0B; - chan = MSM_HDMI_AUDIO_CHANNEL_6; - break; - case 8: - /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */ - channel_allocation = 0x1F; - chan = MSM_HDMI_AUDIO_CHANNEL_8; - break; - default: - return -EINVAL; - } - - switch (params->sample_rate) { - case 32000: - rate = HDMI_SAMPLE_RATE_32KHZ; - break; - case 44100: - rate = HDMI_SAMPLE_RATE_44_1KHZ; - break; - case 48000: - rate = HDMI_SAMPLE_RATE_48KHZ; - break; - case 88200: - rate = HDMI_SAMPLE_RATE_88_2KHZ; - break; - case 96000: - rate = HDMI_SAMPLE_RATE_96KHZ; - break; - case 176400: - rate = HDMI_SAMPLE_RATE_176_4KHZ; - break; - case 192000: - rate = HDMI_SAMPLE_RATE_192KHZ; - break; - default: - DRM_DEV_ERROR(dev, "rate[%d] not supported!\n", - params->sample_rate); - return -EINVAL; - } - - msm_hdmi_audio_set_sample_rate(hdmi, rate); - msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation, - level_shift, down_mix); - - return 0; -} - -static void msm_hdmi_audio_shutdown(struct device *dev, void *data) -{ - struct hdmi *hdmi = dev_get_drvdata(dev); - - msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0); -} - -static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = { - .hw_params = msm_hdmi_audio_hw_params, - .audio_shutdown = msm_hdmi_audio_shutdown, -}; - -static struct hdmi_codec_pdata codec_data = { - .ops = &msm_hdmi_audio_codec_ops, - .max_i2s_channels = 8, - .i2s = 1, -}; - -static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev) -{ - hdmi->audio_pdev = platform_device_register_data(dev, - HDMI_CODEC_DRV_NAME, - PLATFORM_DEVID_AUTO, - &codec_data, - sizeof(codec_data)); - return PTR_ERR_OR_ZERO(hdmi->audio_pdev); -} - static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) { struct msm_drm_private *priv = dev_get_drvdata(master); @@ -362,12 +246,6 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) return err; priv->hdmi = hdmi; - err = msm_hdmi_register_audio_driver(hdmi, dev); - if (err) { - DRM_ERROR("Failed to attach an audio codec %d\n", err); - hdmi->audio_pdev = NULL; - } - return 0; } @@ -377,12 +255,6 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master, struct msm_drm_private *priv = dev_get_drvdata(master); if (priv->hdmi) { - if (priv->hdmi->audio_pdev) - platform_device_unregister(priv->hdmi->audio_pdev); - - if (priv->hdmi->bridge) - msm_hdmi_hpd_disable(priv->hdmi); - msm_hdmi_destroy(priv->hdmi); priv->hdmi = NULL; } @@ -412,6 +284,7 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) hdmi->pdev = pdev; hdmi->config = config; spin_lock_init(&hdmi->reg_lock); + mutex_init(&hdmi->state_mutex); ret = drm_of_find_panel_or_bridge(pdev->dev.of_node, 1, 0, NULL, &hdmi->next_bridge); if (ret && ret != -ENODEV) @@ -438,20 +311,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) if (hdmi->irq < 0) return hdmi->irq; - hdmi->hpd_regs = devm_kcalloc(&pdev->dev, - config->hpd_reg_cnt, - sizeof(hdmi->hpd_regs[0]), - GFP_KERNEL); - if (!hdmi->hpd_regs) - return -ENOMEM; - - for (i = 0; i < config->hpd_reg_cnt; i++) - hdmi->hpd_regs[i].supply = config->hpd_reg_names[i]; - - ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs); - if (ret) - return dev_err_probe(dev, ret, "failed to get hpd regulators\n"); - hdmi->pwr_regs = devm_kcalloc(&pdev->dev, config->pwr_reg_cnt, sizeof(hdmi->pwr_regs[0]), @@ -466,25 +325,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "failed to get pwr regulators\n"); - hdmi->hpd_clks = devm_kcalloc(&pdev->dev, - config->hpd_clk_cnt, - sizeof(hdmi->hpd_clks[0]), - GFP_KERNEL); - if (!hdmi->hpd_clks) - return -ENOMEM; - - for (i = 0; i < config->hpd_clk_cnt; i++) { - struct clk *clk; - - clk = msm_clk_get(pdev, config->hpd_clk_names[i]); - if (IS_ERR(clk)) - return dev_err_probe(dev, PTR_ERR(clk), - "failed to get hpd clk: %s\n", - config->hpd_clk_names[i]); - - hdmi->hpd_clks[i] = clk; - } - hdmi->pwr_clks = devm_kcalloc(&pdev->dev, config->pwr_clk_cnt, sizeof(hdmi->pwr_clks[0]), @@ -492,17 +332,17 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev) if (!hdmi->pwr_clks) return -ENOMEM; - for (i = 0; i < config->pwr_clk_cnt; i++) { - struct clk *clk; + for (i = 0; i < config->pwr_clk_cnt; i++) + hdmi->pwr_clks[i].id = config->pwr_clk_names[i]; - clk = msm_clk_get(pdev, config->pwr_clk_names[i]); - if (IS_ERR(clk)) - return dev_err_probe(dev, PTR_ERR(clk), - "failed to get pwr clk: %s\n", - config->pwr_clk_names[i]); + ret = devm_clk_bulk_get(&pdev->dev, config->pwr_clk_cnt, hdmi->pwr_clks); + if (ret) + return ret; - hdmi->pwr_clks[i] = clk; - } + hdmi->extp_clk = devm_clk_get_optional(&pdev->dev, "extp"); + if (IS_ERR(hdmi->extp_clk)) + return dev_err_probe(dev, PTR_ERR(hdmi->extp_clk), + "failed to get extp clock\n"); hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); /* This will catch e.g. -EPROBE_DEFER */ @@ -548,6 +388,48 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev) msm_hdmi_put_phy(hdmi); } +static int msm_hdmi_runtime_suspend(struct device *dev) +{ + struct hdmi *hdmi = dev_get_drvdata(dev); + const struct hdmi_platform_config *config = hdmi->config; + + clk_bulk_disable_unprepare(config->pwr_clk_cnt, hdmi->pwr_clks); + + pinctrl_pm_select_sleep_state(dev); + + regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs); + + return 0; +} + +static int msm_hdmi_runtime_resume(struct device *dev) +{ + struct hdmi *hdmi = dev_get_drvdata(dev); + const struct hdmi_platform_config *config = hdmi->config; + int ret; + + ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs); + if (ret) + return ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + goto fail; + + ret = clk_bulk_prepare_enable(config->pwr_clk_cnt, hdmi->pwr_clks); + if (ret) + goto fail; + + return 0; + +fail: + pinctrl_pm_select_sleep_state(dev); + + return ret; +} + +DEFINE_RUNTIME_DEV_PM_OPS(msm_hdmi_pm_ops, msm_hdmi_runtime_suspend, msm_hdmi_runtime_resume, NULL); + static const struct of_device_id msm_hdmi_dt_match[] = { { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config }, { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config }, @@ -565,6 +447,7 @@ static struct platform_driver msm_hdmi_driver = { .driver = { .name = "hdmi_msm", .of_match_table = msm_hdmi_dt_match, + .pm = &msm_hdmi_pm_ops, }, }; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index a62d2aedfbb7..d5e572d10d6a 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -24,8 +24,8 @@ struct hdmi_platform_config; struct hdmi_audio { bool enabled; - struct hdmi_audio_infoframe infoframe; int rate; + int channels; }; struct hdmi_hdcp_ctrl; @@ -33,7 +33,6 @@ struct hdmi_hdcp_ctrl; struct hdmi { struct drm_device *dev; struct platform_device *pdev; - struct platform_device *audio_pdev; const struct hdmi_platform_config *config; @@ -42,16 +41,17 @@ struct hdmi { /* video state: */ bool power_on; + bool hpd_enabled; + struct mutex state_mutex; /* protects two booleans */ unsigned long int pixclock; void __iomem *mmio; void __iomem *qfprom_mmio; phys_addr_t mmio_phy_addr; - struct regulator_bulk_data *hpd_regs; struct regulator_bulk_data *pwr_regs; - struct clk **hpd_clks; - struct clk **pwr_clks; + struct clk_bulk_data *pwr_clks; + struct clk *extp_clk; struct gpio_desc *hpd_gpiod; @@ -67,8 +67,6 @@ struct hdmi { /* the encoder we are hooked to (outside of hdmi block) */ struct drm_encoder *encoder; - bool hdmi_mode; /* are we in hdmi mode? */ - int irq; struct workqueue_struct *workq; @@ -86,21 +84,12 @@ struct hdmi { /* platform config data (ie. from DT, or pdata) */ struct hdmi_platform_config { - /* regulators that need to be on for hpd: */ - const char **hpd_reg_names; - int hpd_reg_cnt; - /* regulators that need to be on for screen pwr: */ - const char **pwr_reg_names; + const char * const *pwr_reg_names; int pwr_reg_cnt; - /* clks that need to be on for hpd: */ - const char **hpd_clk_names; - const long unsigned *hpd_freq; - int hpd_clk_cnt; - - /* clks that need to be on for screen pwr (ie pixel clk): */ - const char **pwr_clk_names; + /* clks that need to be on: */ + const char * const *pwr_clk_names; int pwr_clk_cnt; }; @@ -207,26 +196,16 @@ static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev) /* * audio: */ -/* Supported HDMI Audio channels and rates */ -#define MSM_HDMI_AUDIO_CHANNEL_2 0 -#define MSM_HDMI_AUDIO_CHANNEL_4 1 -#define MSM_HDMI_AUDIO_CHANNEL_6 2 -#define MSM_HDMI_AUDIO_CHANNEL_8 3 - -#define HDMI_SAMPLE_RATE_32KHZ 0 -#define HDMI_SAMPLE_RATE_44_1KHZ 1 -#define HDMI_SAMPLE_RATE_48KHZ 2 -#define HDMI_SAMPLE_RATE_88_2KHZ 3 -#define HDMI_SAMPLE_RATE_96KHZ 4 -#define HDMI_SAMPLE_RATE_176_4KHZ 5 -#define HDMI_SAMPLE_RATE_192KHZ 6 +struct hdmi_codec_daifmt; +struct hdmi_codec_params; int msm_hdmi_audio_update(struct hdmi *hdmi); -int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, - uint32_t num_of_channels, uint32_t channel_allocation, - uint32_t level_shift, bool down_mix); -void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate); - +int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector, + struct drm_bridge *bridge, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params); +void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector, + struct drm_bridge *bridge); /* * hdmi bridge: @@ -237,8 +216,8 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi); void msm_hdmi_hpd_irq(struct drm_bridge *bridge); enum drm_connector_status msm_hdmi_bridge_detect( struct drm_bridge *bridge); -int msm_hdmi_hpd_enable(struct drm_bridge *bridge); -void msm_hdmi_hpd_disable(struct hdmi *hdmi); +void msm_hdmi_hpd_enable(struct drm_bridge *bridge); +void msm_hdmi_hpd_disable(struct drm_bridge *bridge); /* * i2c adapter for ddc: diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c index 4c2058c4adc1..b9ec14ef2c20 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c @@ -4,86 +4,28 @@ * Author: Rob Clark <robdclark@gmail.com> */ -#include <linux/hdmi.h> -#include "hdmi.h" +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> -/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ -static int nchannels[] = { 2, 4, 6, 8 }; - -/* Supported HDMI Audio sample rates */ -#define MSM_HDMI_SAMPLE_RATE_32KHZ 0 -#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1 -#define MSM_HDMI_SAMPLE_RATE_48KHZ 2 -#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3 -#define MSM_HDMI_SAMPLE_RATE_96KHZ 4 -#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5 -#define MSM_HDMI_SAMPLE_RATE_192KHZ 6 -#define MSM_HDMI_SAMPLE_RATE_MAX 7 - - -struct hdmi_msm_audio_acr { - uint32_t n; /* N parameter for clock regeneration */ - uint32_t cts; /* CTS parameter for clock regeneration */ -}; - -struct hdmi_msm_audio_arcs { - unsigned long int pixclock; - struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX]; -}; - -#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ } - -/* Audio constants lookup table for hdmi_msm_audio_acr_setup */ -/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */ -static const struct hdmi_msm_audio_arcs acr_lut[] = { - /* 25.200MHz */ - HDMI_MSM_AUDIO_ARCS(25200, { - {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000}, - {12288, 25200}, {25088, 28000}, {24576, 25200} }), - /* 27.000MHz */ - HDMI_MSM_AUDIO_ARCS(27000, { - {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000}, - {12288, 27000}, {25088, 30000}, {24576, 27000} }), - /* 27.027MHz */ - HDMI_MSM_AUDIO_ARCS(27030, { - {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030}, - {12288, 27027}, {25088, 30030}, {24576, 27027} }), - /* 74.250MHz */ - HDMI_MSM_AUDIO_ARCS(74250, { - {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500}, - {12288, 74250}, {25088, 82500}, {24576, 74250} }), - /* 148.500MHz */ - HDMI_MSM_AUDIO_ARCS(148500, { - {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000}, - {12288, 148500}, {25088, 165000}, {24576, 148500} }), -}; - -static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock) -{ - int i; +#include <linux/hdmi.h> - for (i = 0; i < ARRAY_SIZE(acr_lut); i++) { - const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i]; - if (arcs->pixclock == pixclock) - return arcs; - } +#include <sound/hdmi-codec.h> - return NULL; -} +#include "hdmi.h" int msm_hdmi_audio_update(struct hdmi *hdmi) { struct hdmi_audio *audio = &hdmi->audio; - struct hdmi_audio_infoframe *info = &audio->infoframe; - const struct hdmi_msm_audio_arcs *arcs = NULL; bool enabled = audio->enabled; uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl; - uint32_t infofrm_ctrl, audio_config; + uint32_t audio_config; + + if (!hdmi->connector->display_info.is_hdmi) + return -EINVAL; + + DBG("audio: enabled=%d, channels=%d, rate=%d", + audio->enabled, audio->channels, audio->rate); - DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, " - "level_shift_value=%d, downmix_inhibit=%d, rate=%d", - audio->enabled, info->channels, info->channel_allocation, - info->level_shift_value, info->downmix_inhibit, audio->rate); DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock); if (enabled && !(hdmi->power_on && hdmi->pixclock)) { @@ -91,20 +33,10 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) enabled = false; } - if (enabled) { - arcs = get_arcs(hdmi->pixclock); - if (!arcs) { - DBG("disabling audio: unsupported pixclock: %lu", - hdmi->pixclock); - enabled = false; - } - } - /* Read first before writing */ acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL); vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL); aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1); - infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0); audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG); /* Clear N/CTS selection bits */ @@ -113,17 +45,13 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) if (enabled) { uint32_t n, cts, multiplier; enum hdmi_acr_cts select; - uint8_t buf[14]; - n = arcs->lut[audio->rate].n; - cts = arcs->lut[audio->rate].cts; + drm_hdmi_acr_get_n_cts(hdmi->pixclock, audio->rate, &n, &cts); - if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) || - (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) { + if (audio->rate == 192000 || audio->rate == 176400) { multiplier = 4; n >>= 2; /* divide N by 4 and use multiplier */ - } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) || - (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) { + } else if (audio->rate == 96000 || audio->rate == 88200) { multiplier = 2; n >>= 1; /* divide N by 2 and use multiplier */ } else { @@ -136,13 +64,11 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY; acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier); - if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) || - (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) || - (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate)) + if (audio->rate == 48000 || audio->rate == 96000 || + audio->rate == 192000) select = ACR_48; - else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) || - (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) || - (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) + else if (audio->rate == 44100 || audio->rate == 88200 || + audio->rate == 176400) select = ACR_44; else /* default to 32k */ select = ACR_32; @@ -155,20 +81,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) HDMI_ACR_1_N(n)); hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2, - COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) | + COND(audio->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) | HDMI_AUDIO_PKT_CTRL2_OVERRIDE); acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT; acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND; - /* configure infoframe: */ - hdmi_audio_infoframe_pack(info, buf, sizeof(buf)); - hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0, - (buf[3] << 0) | (buf[4] << 8) | - (buf[5] << 16) | (buf[6] << 24)); - hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1, - (buf[7] << 0) | (buf[8] << 8)); - hdmi_write(hdmi, REG_HDMI_GC, 0); vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE; @@ -176,11 +94,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND; - infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND; - infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT; - infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE; - infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE; - audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK; audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4); audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE; @@ -190,17 +103,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE; vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME; aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND; - infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND; - infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT; - infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE; - infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE; audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE; } hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl); hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl); hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl); - hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl); hdmi_write(hdmi, REG_HDMI_AUD_INT, COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) | @@ -214,41 +122,58 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) return 0; } -int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, - uint32_t num_of_channels, uint32_t channel_allocation, - uint32_t level_shift, bool down_mix) +int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector, + struct drm_bridge *bridge, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) { - struct hdmi_audio *audio; - - if (!hdmi) - return -ENXIO; - - audio = &hdmi->audio; - - if (num_of_channels >= ARRAY_SIZE(nchannels)) + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + int ret; + + drm_dbg_driver(bridge->dev, "%u Hz, %d bit, %d channels\n", + params->sample_rate, + params->sample_width, + params->cea.channels); + + switch (params->sample_rate) { + case 32000: + case 44100: + case 48000: + case 88200: + case 96000: + case 176400: + case 192000: + break; + default: + drm_err(bridge->dev, "rate[%d] not supported!\n", + params->sample_rate); return -EINVAL; + } - audio->enabled = enabled; - audio->infoframe.channels = nchannels[num_of_channels]; - audio->infoframe.channel_allocation = channel_allocation; - audio->infoframe.level_shift_value = level_shift; - audio->infoframe.downmix_inhibit = down_mix; + ret = drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector, + ¶ms->cea); + if (ret) + return ret; + + hdmi->audio.rate = params->sample_rate; + hdmi->audio.channels = params->cea.channels; + hdmi->audio.enabled = true; return msm_hdmi_audio_update(hdmi); } -void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate) +void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector, + struct drm_bridge *bridge) { - struct hdmi_audio *audio; - - if (!hdmi) - return; + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; - audio = &hdmi->audio; + drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector); - if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX)) - return; + hdmi->audio.rate = 0; + hdmi->audio.channels = 2; + hdmi->audio.enabled = false; - audio->rate = rate; msm_hdmi_audio_update(hdmi); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 4a5b5112227f..53a7ce8cc7bc 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -7,6 +7,8 @@ #include <linux/delay.h> #include <drm/drm_bridge_connector.h> #include <drm/drm_edid.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> #include "msm_kms.h" #include "hdmi.h" @@ -16,75 +18,53 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge) struct drm_device *dev = bridge->dev; struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; - const struct hdmi_platform_config *config = hdmi->config; - int i, ret; - - pm_runtime_get_sync(&hdmi->pdev->dev); + int ret; - ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs); - if (ret) - DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret); + pm_runtime_resume_and_get(&hdmi->pdev->dev); - if (config->pwr_clk_cnt > 0) { + if (hdmi->extp_clk) { DBG("pixclock: %lu", hdmi->pixclock); - ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock); - if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n", - config->pwr_clk_names[0], ret); - } - } + ret = clk_set_rate(hdmi->extp_clk, hdmi->pixclock); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to set extp clk rate: %d\n", ret); - for (i = 0; i < config->pwr_clk_cnt; i++) { - ret = clk_prepare_enable(hdmi->pwr_clks[i]); - if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n", - config->pwr_clk_names[i], ret); - } + ret = clk_prepare_enable(hdmi->extp_clk); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to enable extp clk: %d\n", ret); } } static void power_off(struct drm_bridge *bridge) { - struct drm_device *dev = bridge->dev; struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; - const struct hdmi_platform_config *config = hdmi->config; - int i, ret; /* TODO do we need to wait for final vblank somewhere before * cutting the clocks? */ mdelay(16 + 4); - for (i = 0; i < config->pwr_clk_cnt; i++) - clk_disable_unprepare(hdmi->pwr_clks[i]); - - ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs); - if (ret) - DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret); + if (hdmi->extp_clk) + clk_disable_unprepare(hdmi->extp_clk); pm_runtime_put(&hdmi->pdev->dev); } #define AVI_IFRAME_LINE_NUMBER 1 +#define SPD_IFRAME_LINE_NUMBER 1 +#define VENSPEC_IFRAME_LINE_NUMBER 3 -static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi) +static int msm_hdmi_config_avi_infoframe(struct hdmi *hdmi, + const u8 *buffer, size_t len) { - struct drm_crtc *crtc = hdmi->encoder->crtc; - const struct drm_display_mode *mode = &crtc->state->adjusted_mode; - union hdmi_infoframe frame; - u8 buffer[HDMI_INFOFRAME_SIZE(AVI)]; + u32 buf[4] = {}; u32 val; - int len; - - drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - hdmi->connector, mode); + int i; - len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer)); - if (len < 0) { + if (len != HDMI_INFOFRAME_SIZE(AVI) || len - 3 > sizeof(buf)) { DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to configure avi infoframe\n"); - return; + return -EINVAL; } /* @@ -93,56 +73,247 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi) * written to the LSB byte of AVI_INFO0 and the version is written to * the third byte from the LSB of AVI_INFO3 */ - hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), + memcpy(buf, &buffer[3], len - 3); + + buf[3] |= buffer[1] << 24; + + for (i = 0; i < ARRAY_SIZE(buf); i++) + hdmi_write(hdmi, REG_HDMI_AVI_INFO(i), buf[i]); + + val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1); + val |= HDMI_INFOFRAME_CTRL0_AVI_SEND | + HDMI_INFOFRAME_CTRL0_AVI_CONT; + hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val); + + val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1); + val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK; + val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER); + hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val); + + return 0; +} + +static int msm_hdmi_config_audio_infoframe(struct hdmi *hdmi, + const u8 *buffer, size_t len) +{ + u32 val; + + if (len != HDMI_INFOFRAME_SIZE(AUDIO)) { + DRM_DEV_ERROR(&hdmi->pdev->dev, + "failed to configure audio infoframe\n"); + return -EINVAL; + } + + hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0, buffer[3] | buffer[4] << 8 | buffer[5] << 16 | buffer[6] << 24); - hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), + hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1, buffer[7] | buffer[8] << 8 | buffer[9] << 16 | buffer[10] << 24); - hdmi_write(hdmi, REG_HDMI_AVI_INFO(2), - buffer[11] | - buffer[12] << 8 | - buffer[13] << 16 | - buffer[14] << 24); + val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1); + val |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND | + HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT | + HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE | + HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE; + hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val); - hdmi_write(hdmi, REG_HDMI_AVI_INFO(3), - buffer[15] | - buffer[16] << 8 | - buffer[1] << 24); + return 0; +} - hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, - HDMI_INFOFRAME_CTRL0_AVI_SEND | - HDMI_INFOFRAME_CTRL0_AVI_CONT); +static int msm_hdmi_config_spd_infoframe(struct hdmi *hdmi, + const u8 *buffer, size_t len) +{ + u32 buf[7] = {}; + u32 val; + int i; - val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1); - val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK; - val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER); - hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val); + if (len != HDMI_INFOFRAME_SIZE(SPD) || len - 3 > sizeof(buf)) { + DRM_DEV_ERROR(&hdmi->pdev->dev, + "failed to configure SPD infoframe\n"); + return -EINVAL; + } + + /* checksum gets written together with the body of the frame */ + hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR, + buffer[0] | + buffer[1] << 8 | + buffer[2] << 16); + + memcpy(buf, &buffer[3], len - 3); + + for (i = 0; i < ARRAY_SIZE(buf); i++) + hdmi_write(hdmi, REG_HDMI_GENERIC1(i), buf[i]); + + val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL); + val |= HDMI_GEN_PKT_CTRL_GENERIC1_SEND | + HDMI_GEN_PKT_CTRL_GENERIC1_CONT | + HDMI_GEN_PKT_CTRL_GENERIC1_LINE(SPD_IFRAME_LINE_NUMBER); + hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val); + + return 0; } -static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge) +static int msm_hdmi_config_hdmi_infoframe(struct hdmi *hdmi, + const u8 *buffer, size_t len) +{ + u32 buf[7] = {}; + u32 val; + int i; + + if (len < HDMI_INFOFRAME_HEADER_SIZE + HDMI_VENDOR_INFOFRAME_SIZE || + len - 3 > sizeof(buf)) { + DRM_DEV_ERROR(&hdmi->pdev->dev, + "failed to configure HDMI infoframe\n"); + return -EINVAL; + } + + /* checksum gets written together with the body of the frame */ + hdmi_write(hdmi, REG_HDMI_GENERIC0_HDR, + buffer[0] | + buffer[1] << 8 | + buffer[2] << 16); + + memcpy(buf, &buffer[3], len - 3); + + for (i = 0; i < ARRAY_SIZE(buf); i++) + hdmi_write(hdmi, REG_HDMI_GENERIC0(i), buf[i]); + + val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL); + val |= HDMI_GEN_PKT_CTRL_GENERIC0_SEND | + HDMI_GEN_PKT_CTRL_GENERIC0_CONT | + HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE | + HDMI_GEN_PKT_CTRL_GENERIC0_LINE(VENSPEC_IFRAME_LINE_NUMBER); + hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val); + + return 0; +} + +static int msm_hdmi_bridge_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + u32 val; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0); + val &= ~(HDMI_INFOFRAME_CTRL0_AVI_SEND | + HDMI_INFOFRAME_CTRL0_AVI_CONT); + hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val); + + val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1); + val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK; + hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val); + + break; + + case HDMI_INFOFRAME_TYPE_AUDIO: + val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0); + val &= ~(HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND | + HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT | + HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE | + HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE); + hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val); + + val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1); + val &= ~HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK; + hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val); + + break; + + case HDMI_INFOFRAME_TYPE_SPD: + val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL); + val &= ~(HDMI_GEN_PKT_CTRL_GENERIC1_SEND | + HDMI_GEN_PKT_CTRL_GENERIC1_CONT | + HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK); + hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val); + + break; + + case HDMI_INFOFRAME_TYPE_VENDOR: + val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL); + val &= ~(HDMI_GEN_PKT_CTRL_GENERIC0_SEND | + HDMI_GEN_PKT_CTRL_GENERIC0_CONT | + HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE | + HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK); + hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val); + + break; + + default: + drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type); + } + + return 0; +} + +static int msm_hdmi_bridge_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + + msm_hdmi_bridge_clear_infoframe(bridge, type); + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + return msm_hdmi_config_avi_infoframe(hdmi, buffer, len); + case HDMI_INFOFRAME_TYPE_AUDIO: + return msm_hdmi_config_audio_infoframe(hdmi, buffer, len); + case HDMI_INFOFRAME_TYPE_SPD: + return msm_hdmi_config_spd_infoframe(hdmi, buffer, len); + case HDMI_INFOFRAME_TYPE_VENDOR: + return msm_hdmi_config_hdmi_infoframe(hdmi, buffer, len); + default: + drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type); + return 0; + } +} + +static void msm_hdmi_set_timings(struct hdmi *hdmi, + const struct drm_display_mode *mode); + +static void msm_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; struct hdmi_phy *phy = hdmi->phy; + struct drm_encoder *encoder = bridge->encoder; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; DBG("power up"); + connector = drm_atomic_get_new_connector_for_encoder(state, encoder); + conn_state = drm_atomic_get_new_connector_state(state, connector); + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + + hdmi->pixclock = conn_state->hdmi.tmds_char_rate; + + msm_hdmi_set_timings(hdmi, &crtc_state->adjusted_mode); + + mutex_lock(&hdmi->state_mutex); if (!hdmi->power_on) { msm_hdmi_phy_resource_enable(phy); msm_hdmi_power_on(bridge); hdmi->power_on = true; - if (hdmi->hdmi_mode) { - msm_hdmi_config_avi_infoframe(hdmi); - msm_hdmi_audio_update(hdmi); - } } + mutex_unlock(&hdmi->state_mutex); + + if (connector->display_info.is_hdmi) + msm_hdmi_audio_update(hdmi); + + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); msm_hdmi_phy_powerup(phy, hdmi->pixclock); @@ -152,7 +323,8 @@ static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge) msm_hdmi_hdcp_on(hdmi->hdcp_ctrl); } -static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge) +static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; @@ -162,32 +334,29 @@ static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge) msm_hdmi_hdcp_off(hdmi->hdcp_ctrl); DBG("power down"); - msm_hdmi_set_mode(hdmi, false); + + /* Keep the HDMI enabled if the HPD is enabled */ + mutex_lock(&hdmi->state_mutex); + msm_hdmi_set_mode(hdmi, hdmi->hpd_enabled); msm_hdmi_phy_powerdown(phy); if (hdmi->power_on) { power_off(bridge); hdmi->power_on = false; - if (hdmi->hdmi_mode) + if (hdmi->connector->display_info.is_hdmi) msm_hdmi_audio_update(hdmi); msm_hdmi_phy_resource_disable(phy); } + mutex_unlock(&hdmi->state_mutex); } -static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode) +static void msm_hdmi_set_timings(struct hdmi *hdmi, + const struct drm_display_mode *mode) { - struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); - struct hdmi *hdmi = hdmi_bridge->hdmi; int hstart, hend, vstart, vend; uint32_t frame_ctrl; - mode = adjusted_mode; - - hdmi->pixclock = mode->clock * 1000; - hstart = mode->htotal - mode->hsync_start; hend = mode->htotal - mode->hsync_start + mode->hdisplay; @@ -231,9 +400,6 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge, frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN; DBG("frame_ctrl=%08x", frame_ctrl); hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl); - - if (hdmi->hdmi_mode) - msm_hdmi_audio_update(hdmi); } static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridge, @@ -251,32 +417,18 @@ static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridg hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); - if (drm_edid) { - /* - * FIXME: This should use connector->display_info.is_hdmi from a - * path that has read the EDID and called - * drm_edid_connector_update(). - */ - const struct edid *edid = drm_edid_raw(drm_edid); - - hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); - } - return drm_edid; } -static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge, - const struct drm_display_info *info, - const struct drm_display_mode *mode) +static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; - const struct hdmi_platform_config *config = hdmi->config; struct msm_drm_private *priv = bridge->dev->dev_private; struct msm_kms *kms = priv->kms; - long actual, requested; - - requested = 1000 * mode->clock; + long actual; /* for mdp5/apq8074, we manage our own pixel clk (as opposed to * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder @@ -284,27 +436,36 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge */ if (kms->funcs->round_pixclk) actual = kms->funcs->round_pixclk(kms, - requested, hdmi_bridge->hdmi->encoder); - else if (config->pwr_clk_cnt > 0) - actual = clk_round_rate(hdmi->pwr_clks[0], requested); + tmds_rate, + hdmi_bridge->hdmi->encoder); + else if (hdmi->extp_clk) + actual = clk_round_rate(hdmi->extp_clk, tmds_rate); else - actual = requested; + actual = tmds_rate; - DBG("requested=%ld, actual=%ld", requested, actual); + DBG("requested=%lld, actual=%ld", tmds_rate, actual); - if (actual != requested) + if (actual != tmds_rate) return MODE_CLOCK_RANGE; return 0; } static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = { - .pre_enable = msm_hdmi_bridge_pre_enable, - .post_disable = msm_hdmi_bridge_post_disable, - .mode_set = msm_hdmi_bridge_mode_set, - .mode_valid = msm_hdmi_bridge_mode_valid, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_pre_enable = msm_hdmi_bridge_atomic_pre_enable, + .atomic_post_disable = msm_hdmi_bridge_atomic_post_disable, .edid_read = msm_hdmi_bridge_edid_read, .detect = msm_hdmi_bridge_detect, + .hpd_enable = msm_hdmi_hpd_enable, + .hpd_disable = msm_hdmi_hpd_disable, + .hdmi_tmds_char_rate_valid = msm_hdmi_bridge_tmds_char_rate_valid, + .hdmi_clear_infoframe = msm_hdmi_bridge_clear_infoframe, + .hdmi_write_infoframe = msm_hdmi_bridge_write_infoframe, + .hdmi_audio_prepare = msm_hdmi_bridge_audio_prepare, + .hdmi_audio_shutdown = msm_hdmi_bridge_audio_shutdown, }; static void @@ -324,21 +485,27 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi) struct hdmi_bridge *hdmi_bridge; int ret; - hdmi_bridge = devm_kzalloc(hdmi->dev->dev, - sizeof(*hdmi_bridge), GFP_KERNEL); - if (!hdmi_bridge) - return -ENOMEM; + hdmi_bridge = devm_drm_bridge_alloc(hdmi->dev->dev, struct hdmi_bridge, base, + &msm_hdmi_bridge_funcs); + if (IS_ERR(hdmi_bridge)) + return PTR_ERR(hdmi_bridge); hdmi_bridge->hdmi = hdmi; INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work); bridge = &hdmi_bridge->base; - bridge->funcs = &msm_hdmi_bridge_funcs; bridge->ddc = hdmi->i2c; bridge->type = DRM_MODE_CONNECTOR_HDMIA; + bridge->vendor = "Qualcomm"; + bridge->product = "Snapdragon"; bridge->ops = DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_HDMI | + DRM_BRIDGE_OP_HDMI_AUDIO | DRM_BRIDGE_OP_EDID; + bridge->hdmi_audio_max_i2s_playback_channels = 8; + bridge->hdmi_audio_dev = &hdmi->pdev->dev; + bridge->hdmi_audio_dai_port = -1; ret = devm_drm_bridge_add(hdmi->dev->dev, bridge); if (ret) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c index 9ce0ffa35417..407e6c449ee0 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c @@ -60,68 +60,30 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi) } } -static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) -{ - const struct hdmi_platform_config *config = hdmi->config; - struct device *dev = &hdmi->pdev->dev; - int i, ret; - - if (enable) { - for (i = 0; i < config->hpd_clk_cnt; i++) { - if (config->hpd_freq && config->hpd_freq[i]) { - ret = clk_set_rate(hdmi->hpd_clks[i], - config->hpd_freq[i]); - if (ret) - dev_warn(dev, - "failed to set clk %s (%d)\n", - config->hpd_clk_names[i], ret); - } - - ret = clk_prepare_enable(hdmi->hpd_clks[i]); - if (ret) { - DRM_DEV_ERROR(dev, - "failed to enable hpd clk: %s (%d)\n", - config->hpd_clk_names[i], ret); - } - } - } else { - for (i = config->hpd_clk_cnt - 1; i >= 0; i--) - clk_disable_unprepare(hdmi->hpd_clks[i]); - } -} - -int msm_hdmi_hpd_enable(struct drm_bridge *bridge) +void msm_hdmi_hpd_enable(struct drm_bridge *bridge) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; - const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; uint32_t hpd_ctrl; int ret; unsigned long flags; - ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs); - if (ret) { - DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret); - goto fail; - } - - ret = pinctrl_pm_select_default_state(dev); - if (ret) { - DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret); - goto fail; - } - if (hdmi->hpd_gpiod) gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1); - pm_runtime_get_sync(dev); - enable_hpd_clocks(hdmi, true); + ret = pm_runtime_resume_and_get(dev); + if (WARN_ON(ret)) + return; + mutex_lock(&hdmi->state_mutex); msm_hdmi_set_mode(hdmi, false); msm_hdmi_phy_reset(hdmi); msm_hdmi_set_mode(hdmi, true); + hdmi->hpd_enabled = true; + mutex_unlock(&hdmi->state_mutex); + hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b); /* enable HPD events: */ @@ -140,34 +102,23 @@ int msm_hdmi_hpd_enable(struct drm_bridge *bridge) hdmi_write(hdmi, REG_HDMI_HPD_CTRL, HDMI_HPD_CTRL_ENABLE | hpd_ctrl); spin_unlock_irqrestore(&hdmi->reg_lock, flags); - - return 0; - -fail: - return ret; } -void msm_hdmi_hpd_disable(struct hdmi *hdmi) +void msm_hdmi_hpd_disable(struct drm_bridge *bridge) { - const struct hdmi_platform_config *config = hdmi->config; + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; struct device *dev = &hdmi->pdev->dev; - int ret; /* Disable HPD interrupt */ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); - msm_hdmi_set_mode(hdmi, false); + mutex_lock(&hdmi->state_mutex); + hdmi->hpd_enabled = false; + msm_hdmi_set_mode(hdmi, hdmi->power_on); + mutex_unlock(&hdmi->state_mutex); - enable_hpd_clocks(hdmi, false); pm_runtime_put(dev); - - ret = pinctrl_pm_select_sleep_state(dev); - if (ret) - dev_warn(dev, "pinctrl state chg failed: %d\n", ret); - - ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs); - if (ret) - dev_warn(dev, "failed to disable hpd regulator: %d\n", ret); } void msm_hdmi_hpd_irq(struct drm_bridge *bridge) @@ -202,14 +153,16 @@ void msm_hdmi_hpd_irq(struct drm_bridge *bridge) static enum drm_connector_status detect_reg(struct hdmi *hdmi) { - uint32_t hpd_int_status; + u32 hpd_int_status = 0; + int ret; - pm_runtime_get_sync(&hdmi->pdev->dev); - enable_hpd_clocks(hdmi, true); + ret = pm_runtime_resume_and_get(&hdmi->pdev->dev); + if (ret) + goto out; hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); - enable_hpd_clocks(hdmi, false); +out: pm_runtime_put(&hdmi->pdev->dev); return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ? diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c index 7aa500d24240..ebefea4fb408 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c @@ -107,11 +107,15 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c, if (num == 0) return num; + ret = pm_runtime_resume_and_get(&hdmi->pdev->dev); + if (ret) + return ret; + init_ddc(hdmi_i2c); ret = ddc_clear_irq(hdmi_i2c); if (ret) - return ret; + goto fail; for (i = 0; i < num; i++) { struct i2c_msg *p = &msgs[i]; @@ -169,7 +173,7 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c, hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS), hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS), hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL)); - return ret; + goto fail; } ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS); @@ -202,7 +206,13 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c, } } + pm_runtime_put(&hdmi->pdev->dev); + return i; + +fail: + pm_runtime_put(&hdmi->pdev->dev); + return ret; } static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c index 03120c54ced6..667573f1db7c 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -58,7 +58,11 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy) struct device *dev = &phy->pdev->dev; int i, ret = 0; - pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) { + DRM_DEV_ERROR(dev, "runtime resume failed: %d\n", ret); + return ret; + } ret = regulator_bulk_enable(cfg->num_regs, phy->regs); if (ret) { diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index a7a2384044ff..87a91148a731 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -183,10 +183,16 @@ static unsigned get_crtc_mask(struct drm_atomic_state *state) int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_crtc *crtc; - int i; + int i, ret = 0; + /* + * FIXME: stop setting allow_modeset and move this check to the DPU + * driver. + */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if ((old_crtc_state->ctm && !new_crtc_state->ctm) || @@ -196,6 +202,11 @@ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) } } + if (kms && kms->funcs && kms->funcs->check_mode_changed) + ret = kms->funcs->check_mode_changed(kms, state); + if (ret) + return ret; + return drm_atomic_helper_check(dev, state); } @@ -221,6 +232,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); + atomic_set(&kms->fault_snapshot_capture, 0); + /* * Now that there is no in-progress flush, prepare the * current update: diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 7ab607252d18..6af72162cda4 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -208,6 +208,35 @@ DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops, shrink_get, shrink_set, "0x%08llx\n"); +/* + * Return the number of microseconds to wait until stall-on-fault is + * re-enabled. If 0 then it is already enabled or will be re-enabled on the + * next submit (unless there's a leftover devcoredump). This is useful for + * kernel tests that intentionally produce a fault and check the devcoredump to + * wait until the cooldown period is over. + */ + +static int +stall_reenable_time_get(void *data, u64 *val) +{ + struct msm_drm_private *priv = data; + unsigned long irq_flags; + + spin_lock_irqsave(&priv->fault_stall_lock, irq_flags); + + if (priv->stall_enabled) + *val = 0; + else + *val = max(ktime_us_delta(priv->stall_reenable_time, ktime_get()), 0); + + spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(stall_reenable_time_fops, + stall_reenable_time_get, NULL, + "%lld\n"); static int msm_gem_show(struct seq_file *m, void *arg) { @@ -319,6 +348,9 @@ static void msm_debugfs_gpu_init(struct drm_minor *minor) debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root, &priv->disable_err_irq); + debugfs_create_file("stall_reenable_time_us", 0400, minor->debugfs_root, + priv, &stall_reenable_time_fops); + gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root); debugfs_create_bool("idle_clamp",0600, gpu_devfreq, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index ff7a7a9f7b0d..d007687c2446 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -245,6 +245,10 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock); drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock); + /* Initialize stall-on-fault */ + spin_lock_init(&priv->fault_stall_lock); + priv->stall_enabled = true; + /* Teach lockdep about lock ordering wrt. shrinker: */ fs_reclaim_acquire(GFP_KERNEL); might_lock(&priv->lru.lock); @@ -671,7 +675,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value); break; case MSM_INFO_GET_FLAGS: - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { ret = -EINVAL; break; } @@ -894,6 +898,7 @@ static const struct drm_driver msm_driver = { DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_MODESET | + DRIVER_SYNCOBJ_TIMELINE | DRIVER_SYNCOBJ, .open = msm_open, .postclose = msm_postclose, @@ -925,7 +930,7 @@ static const struct drm_driver msm_driver = { * is no external component that we need to add since LVDS is within MDP4 * itself. */ -static int add_components_mdp(struct device *master_dev, +static int add_mdp_components(struct device *master_dev, struct component_match **matchptr) { struct device_node *np = master_dev->of_node; @@ -1029,7 +1034,7 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - if (of_device_is_available(np)) + if (of_device_is_available(np) && adreno_has_gpu(np)) drm_of_component_match_add(dev, matchptr, component_compare_of, np); of_node_put(np); @@ -1070,7 +1075,7 @@ int msm_drv_probe(struct device *master_dev, /* Add mdp components if we have KMS. */ if (kms_init) { - ret = add_components_mdp(master_dev, &match); + ret = add_mdp_components(master_dev, &match); if (ret) return ret; } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index fee31680a6d5..c8afb1ea6040 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -222,6 +222,29 @@ struct msm_drm_private { * the sw hangcheck mechanism. */ bool disable_err_irq; + + /** + * @fault_stall_lock: + * + * Serialize changes to stall-on-fault state. + */ + spinlock_t fault_stall_lock; + + /** + * @fault_stall_reenable_time: + * + * If stall_enabled is false, when to reenable stall-on-fault. + * Protected by @fault_stall_lock. + */ + ktime_t stall_reenable_time; + + /** + * @stall_enabled: + * + * Whether stall-on-fault is currently enabled. Protected by + * @fault_stall_lock. + */ + bool stall_enabled; }; const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier); @@ -537,15 +560,12 @@ static inline int align_pitch(int width, int bpp) static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) { ktime_t now = ktime_get(); - s64 remaining_jiffies; - if (ktime_compare(*timeout, now) < 0) { - remaining_jiffies = 0; - } else { - ktime_t rem = ktime_sub(*timeout, now); - remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); - } + if (ktime_compare(*timeout, now) <= 0) + return 0; + ktime_t rem = ktime_sub(*timeout, now); + s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); return clamp(remaining_jiffies, 1LL, (s64)INT_MAX); } diff --git a/drivers/gpu/drm/msm/msm_dsc_helper.h b/drivers/gpu/drm/msm/msm_dsc_helper.h index b9049fe1e279..63f95523b2cb 100644 --- a/drivers/gpu/drm/msm/msm_dsc_helper.h +++ b/drivers/gpu/drm/msm/msm_dsc_helper.h @@ -13,17 +13,6 @@ #include <drm/display/drm_dsc_helper.h> /** - * msm_dsc_get_slices_per_intf() - calculate number of slices per interface - * @dsc: Pointer to drm dsc config struct - * @intf_width: interface width in pixels - * Returns: Integer representing the number of slices for the given interface - */ -static inline u32 msm_dsc_get_slices_per_intf(const struct drm_dsc_config *dsc, u32 intf_width) -{ - return DIV_ROUND_UP(intf_width, dsc->slice_width); -} - -/** * msm_dsc_get_bytes_per_line() - calculate bytes per line * @dsc: Pointer to drm dsc config struct * Returns: Integer value representing bytes per line. DSI and DP need diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index 1a5d4f1c8b42..d41e5a6bbee0 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -65,8 +65,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr, fctx->completed_fence = fctx->last_fence; *fctx->fenceptr = fctx->last_fence; - hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - fctx->deadline_timer.function = deadline_timer; + hrtimer_setup(&fctx->deadline_timer, deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); kthread_init_work(&fctx->deadline_work, deadline_work); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index ebc9ba66efb8..2995e80fec3b 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -735,7 +735,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) msm_gem_assert_locked(obj); - if (obj->import_attach) + if (drm_gem_is_imported(obj)) return ERR_PTR(-ENODEV); pages = msm_gem_get_pages_locked(obj, madv); @@ -1074,7 +1074,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj) put_iova_spaces(obj, true); - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { GEM_WARN_ON(msm_obj->vaddr); /* Don't drop the pages for imported dmabuf, as they are not diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 85f0257e83da..ba5c4ff76292 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -224,7 +224,7 @@ msm_gem_assert_locked(struct drm_gem_object *obj) /* imported/exported objects are not purgeable: */ static inline bool is_unpurgeable(struct msm_gem_object *msm_obj) { - return msm_obj->base.import_attach || msm_obj->pin_count; + return drm_gem_is_imported(&msm_obj->base) || msm_obj->pin_count; } static inline bool is_purgeable(struct msm_gem_object *msm_obj) diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index ee267490c935..2e37913d5a6a 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -50,7 +50,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj) struct page **pages; int ret = 0; - if (obj->import_attach) + if (drm_gem_is_imported(obj)) return 0; pages = msm_gem_pin_pages_locked(obj); @@ -62,7 +62,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj) void msm_gem_prime_unpin(struct drm_gem_object *obj) { - if (obj->import_attach) + if (drm_gem_is_imported(obj)) return; msm_gem_unpin_pages_locked(obj); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index dee470403036..d4f71bb54e84 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -85,6 +85,15 @@ void __msm_gem_submit_destroy(struct kref *kref) container_of(kref, struct msm_gem_submit, ref); unsigned i; + /* + * In error paths, we could unref the submit without calling + * drm_sched_entity_push_job(), so msm_job_free() will never + * get called. Since drm_sched_job_cleanup() will NULL out + * s_fence, we can use that to detect this case. + */ + if (submit->base.s_fence) + drm_sched_job_cleanup(&submit->base); + if (submit->fence_id) { spin_lock(&submit->queue->idr_lock); idr_remove(&submit->queue->fence_idr, submit->fence_id); @@ -509,7 +518,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit, } if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) { - ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags); + ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags); break; } @@ -649,6 +658,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_ringbuffer *ring; struct msm_submit_post_dep *post_deps = NULL; struct drm_syncobj **syncobjs_to_reset = NULL; + struct sync_file *sync_file = NULL; int out_fence_fd = -1; unsigned i; int ret; @@ -858,7 +868,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { - struct sync_file *sync_file = sync_file_create(submit->user_fence); + sync_file = sync_file_create(submit->user_fence); if (!sync_file) { ret = -ENOMEM; } else { @@ -892,8 +902,11 @@ out: out_unlock: mutex_unlock(&queue->lock); out_post_unlock: - if (ret && (out_fence_fd >= 0)) + if (ret && (out_fence_fd >= 0)) { put_unused_fd(out_fence_fd); + if (sync_file) + fput(sync_file->file); + } if (!IS_ERR_OR_NULL(submit)) { msm_gem_submit_put(submit); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 8557998e0c92..3947f7ba1421 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -257,7 +257,8 @@ out: } static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, - struct msm_gem_submit *submit, char *comm, char *cmd) + struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info, + char *comm, char *cmd) { struct msm_gpu_state *state; @@ -276,11 +277,21 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, /* Fill in the additional crash state information */ state->comm = kstrdup(comm, GFP_KERNEL); state->cmd = kstrdup(cmd, GFP_KERNEL); - state->fault_info = gpu->fault_info; + if (fault_info) + state->fault_info = *fault_info; if (submit) { int i; + if (state->fault_info.ttbr0) { + struct msm_gpu_fault_info *info = &state->fault_info; + struct msm_mmu *mmu = submit->aspace->mmu; + + msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0, + &info->asid); + msm_iommu_pagetable_walk(mmu, info->iova, info->ptes); + } + state->bos = kcalloc(submit->nr_bos, sizeof(struct msm_gpu_state_bo), GFP_KERNEL); @@ -299,7 +310,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, } #else static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, - struct msm_gem_submit *submit, char *comm, char *cmd) + struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info, + char *comm, char *cmd) { } #endif @@ -396,7 +408,7 @@ static void recover_worker(struct kthread_work *work) /* Record the crash state */ pm_runtime_get_sync(&gpu->pdev->dev); - msm_gpu_crashstate_capture(gpu, submit, comm, cmd); + msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd); kfree(cmd); kfree(comm); @@ -450,9 +462,8 @@ out_unlock: msm_gpu_retire(gpu); } -static void fault_worker(struct kthread_work *work) +void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info) { - struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work); struct msm_gem_submit *submit; struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); char *comm = NULL, *cmd = NULL; @@ -475,16 +486,13 @@ static void fault_worker(struct kthread_work *work) /* Record the crash state */ pm_runtime_get_sync(&gpu->pdev->dev); - msm_gpu_crashstate_capture(gpu, submit, comm, cmd); + msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd); pm_runtime_put_sync(&gpu->pdev->dev); kfree(cmd); kfree(comm); resume_smmu: - memset(&gpu->fault_info, 0, sizeof(gpu->fault_info)); - gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); - mutex_unlock(&gpu->lock); } @@ -512,7 +520,7 @@ static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) static void hangcheck_handler(struct timer_list *t) { - struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer); + struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer); struct drm_device *dev = gpu->dev; struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); uint32_t fence = ring->memptrs->fence; @@ -873,7 +881,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, init_waitqueue_head(&gpu->retire_event); kthread_init_work(&gpu->retire_work, retire_worker); kthread_init_work(&gpu->recover_work, recover_worker); - kthread_init_work(&gpu->fault_work, fault_worker); priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD; diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 7cabc8480d7c..5bf7cd985b9c 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -101,6 +101,14 @@ struct msm_gpu_fault_info { int flags; const char *type; const char *block; + + /* Information about what we think/expect is the current SMMU state, + * for example expected_ttbr0 should match smmu_info.ttbr0 which + * was read back from SMMU registers. + */ + phys_addr_t pgtbl_ttbr0; + u64 ptes[4]; + int asid; }; /** @@ -245,12 +253,6 @@ struct msm_gpu { #define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3 struct timer_list hangcheck_timer; - /* Fault info for most recent iova fault: */ - struct msm_gpu_fault_info fault_info; - - /* work for handling GPU ioval faults: */ - struct kthread_work fault_work; - /* work for handling GPU recovery: */ struct kthread_work recover_work; @@ -660,6 +662,7 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *ta void msm_gpu_cleanup(struct msm_gpu *gpu); struct msm_gpu *adreno_load_gpu(struct drm_device *dev); +bool adreno_has_gpu(struct device_node *node); void __init adreno_register(void); void __exit adreno_unregister(void); @@ -697,6 +700,8 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) mutex_unlock(&gpu->lock); } +void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info); + /* * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can * support expanded privileges diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index 6970b0f7f457..2e1d5c343272 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -156,6 +156,7 @@ void msm_devfreq_init(struct msm_gpu *gpu) priv->gpu_devfreq_config.downdifferential = 10; mutex_init(&df->lock); + df->suspended = true; ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, DEV_PM_QOS_MIN_FREQUENCY, 0); diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c index afedd61c3e28..a6efe1eac271 100644 --- a/drivers/gpu/drm/msm/msm_io_utils.c +++ b/drivers/gpu/drm/msm/msm_io_utils.c @@ -135,8 +135,7 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work, clockid_t clock_id, enum hrtimer_mode mode) { - hrtimer_init(&work->timer, clock_id, mode); - work->timer.function = msm_hrtimer_worktimer; + hrtimer_setup(&work->timer, msm_hrtimer_worktimer, clock_id, mode); work->worker = worker; kthread_init_work(&work->work, fn); } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 2a94e82316f9..739ce2c283a4 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -195,6 +195,28 @@ struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu) return &iommu->domain->geometry; } +int +msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]) +{ + struct msm_iommu_pagetable *pagetable; + struct arm_lpae_io_pgtable_walk_data wd = {}; + + if (mmu->type != MSM_MMU_IOMMU_PAGETABLE) + return -EINVAL; + + pagetable = to_pagetable(mmu); + + if (!pagetable->pgtbl_ops->pgtable_walk) + return -EINVAL; + + pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, &wd); + + for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++) + ptes[i] = wd.ptes[i]; + + return 0; +} + static const struct msm_mmu_funcs pagetable_funcs = { .map = msm_iommu_pagetable_map, .unmap = msm_iommu_pagetable_unmap, @@ -243,7 +265,7 @@ static const struct iommu_flush_ops tlb_ops = { .tlb_add_page = msm_iommu_tlb_add_page, }; -static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, +static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg); struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) @@ -319,11 +341,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) return &pagetable->base; } -static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, +static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg) { struct msm_iommu *iommu = arg; - struct msm_mmu *mmu = &iommu->base; struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev); struct adreno_smmu_fault_info info, *ptr = NULL; @@ -337,18 +358,26 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags); - if (mmu->funcs->resume_translation) - mmu->funcs->resume_translation(mmu); - return 0; } -static void msm_iommu_resume_translation(struct msm_mmu *mmu) +static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags, void *arg) +{ + struct msm_iommu *iommu = arg; + + if (iommu->base.handler) + return iommu->base.handler(iommu->base.arg, iova, flags, NULL); + + return -ENOSYS; +} + +static void msm_iommu_set_stall(struct msm_mmu *mmu, bool enable) { struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev); - if (adreno_smmu->resume_translation) - adreno_smmu->resume_translation(adreno_smmu->cookie, true); + if (adreno_smmu->set_stall) + adreno_smmu->set_stall(adreno_smmu->cookie, enable); } static void msm_iommu_detach(struct msm_mmu *mmu) @@ -398,7 +427,7 @@ static const struct msm_mmu_funcs funcs = { .map = msm_iommu_map, .unmap = msm_iommu_unmap, .destroy = msm_iommu_destroy, - .resume_translation = msm_iommu_resume_translation, + .set_stall = msm_iommu_set_stall, }; struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks) @@ -437,6 +466,21 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks) return &iommu->base; } +struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks) +{ + struct msm_iommu *iommu; + struct msm_mmu *mmu; + + mmu = msm_iommu_new(dev, quirks); + if (IS_ERR_OR_NULL(mmu)) + return mmu; + + iommu = to_msm_iommu(mmu); + iommu_set_fault_handler(iommu->domain, msm_disp_fault_handler, iommu); + + return mmu; +} + struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks) { struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev); @@ -448,7 +492,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig return mmu; iommu = to_msm_iommu(mmu); - iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu); + iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu); /* Enable stall on iommu fault: */ if (adreno_smmu->set_stall) diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c index 38965e12a6bf..35d5397e73b4 100644 --- a/drivers/gpu/drm/msm/msm_kms.c +++ b/drivers/gpu/drm/msm/msm_kms.c @@ -164,12 +164,26 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc) vblank_ctrl_queue_work(priv, crtc, false); } +static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void *data) +{ + struct msm_kms *kms = arg; + + if (atomic_read(&kms->fault_snapshot_capture) == 0) { + msm_disp_snapshot_state(kms->dev); + atomic_inc(&kms->fault_snapshot_capture); + } + + return -ENOSYS; +} + struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev) { struct msm_gem_address_space *aspace; struct msm_mmu *mmu; struct device *mdp_dev = dev->dev; struct device *mdss_dev = mdp_dev->parent; + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; struct device *iommu_dev; /* @@ -181,7 +195,7 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev) else iommu_dev = mdss_dev; - mmu = msm_iommu_new(iommu_dev, 0); + mmu = msm_iommu_disp_new(iommu_dev, 0); if (IS_ERR(mmu)) return ERR_CAST(mmu); @@ -195,8 +209,11 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev) if (IS_ERR(aspace)) { dev_err(mdp_dev, "aspace create, error %pe\n", aspace); mmu->funcs->destroy(mmu); + return aspace; } + msm_mmu_set_fault_handler(aspace->mmu, kms, msm_kms_fault_handler); + return aspace; } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index e60162744c66..43b58d052ee6 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -60,6 +60,13 @@ struct msm_kms_funcs { void (*disable_commit)(struct msm_kms *kms); /** + * @check_mode_changed: + * + * Verify if the commit requires a full modeset on one of CRTCs. + */ + int (*check_mode_changed)(struct msm_kms *kms, struct drm_atomic_state *state); + + /** * Prepare for atomic commit. This is called after any previous * (async or otherwise) commit has completed. */ @@ -128,6 +135,9 @@ struct msm_kms { int irq; bool irq_requested; + /* rate limit the snapshot capture to once per attach */ + atomic_t fault_snapshot_capture; + /* mapper-id used to request GEM buffer mapped for scanout: */ struct msm_gem_address_space *aspace; diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index dcb49fd30402..709979fcfab6 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -150,7 +150,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss) dev = msm_mdss->dev; - domain = irq_domain_add_linear(dev->of_node, 32, + domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32, &msm_mdss_irqdomain_ops, msm_mdss); if (!domain) { dev_err(dev, "failed to add irq_domain\n"); @@ -592,6 +592,16 @@ static const struct msm_mdss_data sa8775p_data = { .reg_bus_bw = 74000, }; +static const struct msm_mdss_data sar2130p_data = { + .ubwc_enc_version = UBWC_3_0, /* 4.0.2 in hw */ + .ubwc_dec_version = UBWC_4_3, + .ubwc_swizzle = 6, + .ubwc_bank_spread = true, + .highest_bank_bit = 0, + .macrotile_mode = 1, + .reg_bus_bw = 74000, +}; + static const struct msm_mdss_data sc7180_data = { .ubwc_enc_version = UBWC_2_0, .ubwc_dec_version = UBWC_2_0, @@ -738,6 +748,7 @@ static const struct of_device_id mdss_dt_match[] = { { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data }, { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data }, { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data }, + { .compatible = "qcom,sar2130p-mdss", .data = &sar2130p_data }, { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data }, { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data }, { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data }, diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 88af4f490881..0c694907140d 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -15,7 +15,7 @@ struct msm_mmu_funcs { size_t len, int prot); int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); void (*destroy)(struct msm_mmu *mmu); - void (*resume_translation)(struct msm_mmu *mmu); + void (*set_stall)(struct msm_mmu *mmu, bool enable); }; enum msm_mmu_type { @@ -42,6 +42,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks); struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks); +struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks); static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, int (*handler)(void *arg, unsigned long iova, int flags, void *data)) @@ -53,7 +54,8 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent); int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr, - int *asid); + int *asid); +int msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]); struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu); #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index c803556a8f64..89dce15eed3b 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -59,8 +59,14 @@ static const struct drm_sched_backend_ops msm_sched_ops = { struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, void *memptrs, uint64_t memptrs_iova) { + struct drm_sched_init_args args = { + .ops = &msm_sched_ops, + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = num_hw_submissions, + .timeout = MAX_SCHEDULE_TIMEOUT, + .dev = gpu->dev->dev, + }; struct msm_ringbuffer *ring; - long sched_timeout; char name[32]; int ret; @@ -87,6 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, } msm_gem_object_set_name(ring->bo, "ring%d", id); + args.name = to_msm_bo(ring->bo)->name; ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2); ring->next = ring->start; @@ -95,13 +102,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ring->memptrs = memptrs; ring->memptrs_iova = memptrs_iova; - /* currently managing hangcheck ourselves: */ - sched_timeout = MAX_SCHEDULE_TIMEOUT; - - ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - num_hw_submissions, 0, sched_timeout, - NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev); + ret = drm_sched_init(&ring->sched, &args); if (ret) { goto fail; } diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml index 55a35182858c..462713401622 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -2255,7 +2255,15 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) <reg32 offset="0" name="0"> <bitfield name="CLEAR_ON_CHIP_TS" pos="0" type="boolean"/> <bitfield name="CLEAR_RESOURCE_TABLE" pos="1" type="boolean"/> - <bitfield name="CLEAR_GLOBAL_LOCAL_TS" pos="2" type="boolean"/> + <bitfield name="CLEAR_BV_BR_COUNTER" pos="2" type="boolean"/> + <bitfield name="RESET_GLOBAL_LOCAL_TS" pos="3" type="boolean"/> + </reg32> +</domain> + +<domain name="CP_INDIRECT_BUFFER" width="32" varset="chip" prefix="chip" variants="A5XX-"> + <reg64 offset="0" name="IB_BASE" type="address"/> + <reg32 offset="2" name="2"> + <bitfield name="IB_SIZE" low="0" high="19"/> </reg32> </domain> diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml index d54b72f92449..d2c8c46bb041 100644 --- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml +++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml @@ -9,8 +9,16 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <reg32 offset="0x00004" name="REVISION_ID1"/> <reg32 offset="0x00008" name="REVISION_ID2"/> <reg32 offset="0x0000c" name="REVISION_ID3"/> - <reg32 offset="0x00010" name="CLK_CFG0"/> - <reg32 offset="0x00014" name="CLK_CFG1"/> + <reg32 offset="0x00010" name="CLK_CFG0"> + <bitfield name="DIV_CTRL_3_0" low="0" high="3" type="uint"/> + <bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/> + </reg32> + <reg32 offset="0x00014" name="CLK_CFG1"> + <bitfield name="CLK_EN" pos="5" type="boolean"/> + <bitfield name="CLK_EN_SEL" pos="4" type="boolean"/> + <bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/> + <bitfield name="DSICLK_SEL" low="0" high="1" type="uint"/> + </reg32> <reg32 offset="0x00018" name="GLBL_CTRL"/> <reg32 offset="0x0001c" name="RBUF_CTRL"/> <reg32 offset="0x00020" name="VREG_CTRL_0"/> diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml index 1cf1b14fbd91..0ebb96297dae 100644 --- a/drivers/gpu/drm/msm/registers/display/hdmi.xml +++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml @@ -131,7 +131,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> --> <bitfield name="GENERIC0_SEND" pos="0" type="boolean"/> <bitfield name="GENERIC0_CONT" pos="1" type="boolean"/> - <bitfield name="GENERIC0_UPDATE" low="2" high="3" type="uint"/> <!-- ??? --> + <bitfield name="GENERIC0_UPDATE" pos="2" type="boolean"/> <bitfield name="GENERIC1_SEND" pos="4" type="boolean"/> <bitfield name="GENERIC1_CONT" pos="5" type="boolean"/> <bitfield name="GENERIC0_LINE" low="16" high="21" type="uint"/> diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py index 3926485bb197..a409404627c7 100644 --- a/drivers/gpu/drm/msm/registers/gen_header.py +++ b/drivers/gpu/drm/msm/registers/gen_header.py @@ -11,6 +11,7 @@ import collections import argparse import time import datetime +import re class Error(Exception): def __init__(self, message): @@ -877,13 +878,14 @@ The rules-ng-ng source files this header was generated from are: """) maxlen = 0 for filepath in p.xml_files: - maxlen = max(maxlen, len(filepath)) + new_filepath = re.sub("^.+drivers","drivers",filepath) + maxlen = max(maxlen, len(new_filepath)) for filepath in p.xml_files: - pad = " " * (maxlen - len(filepath)) + pad = " " * (maxlen - len(new_filepath)) filesize = str(os.path.getsize(filepath)) filesize = " " * (7 - len(filesize)) + filesize filetime = time.ctime(os.path.getmtime(filepath)) - print("- " + filepath + pad + " (" + filesize + " bytes, from " + filetime + ")") + print("- " + new_filepath + pad + " (" + filesize + " bytes, from <stripped>)") if p.copyright_year: current_year = str(datetime.date.today().year) print() |