diff options
author | Dave Airlie <airlied@redhat.com> | 2021-08-17 03:45:26 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2021-08-17 03:53:52 +0300 |
commit | 397ab98e2d69cede84444a28eab77a171983d14e (patch) | |
tree | 2a85d9ac5b32b5d86a8d90890532cf34cbeec15e /drivers/gpu/drm/msm | |
parent | f97a1b658052640e18d14dcefcb90f01ae9e0d95 (diff) | |
parent | cb0927ab80d224c9074f53d1a55b087d12ec5a85 (diff) | |
download | linux-397ab98e2d69cede84444a28eab77a171983d14e.tar.xz |
Merge tag 'drm-msm-next-2021-08-12' of https://gitlab.freedesktop.org/drm/msm into drm-next
This is the main pull for v5.15, after the early pull request with
drm/scheduler conversion:
* New a6xx GPU support: a680 and 7c3
* dsi: 7nm phi, sc7280 support, test pattern generator support
* mdp4 fixes for older hw like the nexus7
* displayport fixes
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGs_tyanTeDGMH1X+Uf4wdyy7jYj-CinGXXVETiYOESahw@mail.gmail.com
Diffstat (limited to 'drivers/gpu/drm/msm')
44 files changed, 1102 insertions, 585 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index dc7f3e40850b..e9c6af78b1d7 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -116,9 +116,9 @@ config DRM_MSM_DSI_10NM_PHY Choose this option if DSI PHY on SDM845 is used on the platform. config DRM_MSM_DSI_7NM_PHY - bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)" + bool "Enable DSI 7nm PHY driver in MSM DRM" depends on DRM_MSM_DSI default y help - Choose this option if DSI PHY on SM8150/SM8250 is used on the - platform. + Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on + the platform. diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 0a93ed1d6b06..5e2750eb3810 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -18,6 +18,18 @@ static void a5xx_dump(struct msm_gpu *gpu); #define GPU_PAS_ID 13 +static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (a5xx_gpu->has_whereami) { + OUT_PKT7(ring, CP_WHERE_AM_I, 2); + OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring))); + OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); + } +} + void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync) { @@ -30,11 +42,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, * Most flush operations need to issue a WHERE_AM_I opcode to sync up * the rptr shadow */ - if (a5xx_gpu->has_whereami && sync) { - OUT_PKT7(ring, CP_WHERE_AM_I, 2); - OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring))); - OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); - } + if (sync) + update_shadow_rptr(gpu, ring); spin_lock_irqsave(&ring->preempt_lock, flags); @@ -168,6 +177,16 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) ibs++; break; } + + /* + * Periodically update shadow-wptr if needed, so that we + * can see partial progress of submits with large # of + * cmds.. otherwise we could needlessly stall waiting for + * ringbuffer state, simply due to looking at a shadow + * rptr value that has not been updated + */ + if ((ibs % 32) == 0) + update_shadow_rptr(gpu, ring); } /* diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index d7cec7f0dde0..a7c58018959f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -519,9 +519,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) if (!pdcptr) goto err; - if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) + if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu)) pdc_in_aop = true; - else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu)) + else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu)) pdc_address_offset = 0x30090; else pdc_address_offset = 0x30080; @@ -933,6 +933,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Use a known rate to bring up the GMU */ clk_set_rate(gmu->core_clk, 200000000); + clk_set_rate(gmu->hub_clk, 150000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); if (ret) { pm_runtime_put(gmu->gxpd); @@ -1393,6 +1394,9 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, gmu->nr_clocks, "gmu"); + gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, + gmu->nr_clocks, "hub"); + return 0; } @@ -1504,7 +1508,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) * are otherwise unused by a660. */ gmu->dummy.size = SZ_4K; - if (adreno_is_a660(adreno_gpu)) { + if (adreno_is_a660_family(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000); if (ret) goto err_memory; @@ -1522,7 +1526,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) SZ_16M - SZ_16K, 0x04000); if (ret) goto err_memory; - } else if (adreno_is_a640(adreno_gpu)) { + } else if (adreno_is_a640_family(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, SZ_256K - SZ_16K, 0x04000); if (ret) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 71dfa60070cc..3c74f64e3126 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -66,6 +66,7 @@ struct a6xx_gmu { int nr_clocks; struct clk_bulk_data *clocks; struct clk *core_clk; + struct clk *hub_clk; /* current performance index set externally */ int current_perf_index; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 989301230f14..40c9fef457a4 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -52,21 +52,25 @@ static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return true; } -static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - uint32_t wptr; - unsigned long flags; /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */ if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { - struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - OUT_PKT7(ring, CP_WHERE_AM_I, 2); OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring))); OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring))); } +} + +static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + uint32_t wptr; + unsigned long flags; + + update_shadow_rptr(gpu, ring); spin_lock_irqsave(&ring->preempt_lock, flags); @@ -145,7 +149,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring; - unsigned int i; + unsigned int i, ibs = 0; a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); @@ -181,8 +185,19 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); OUT_RING(ring, submit->cmd[i].size); + ibs++; break; } + + /* + * Periodically update shadow-wptr if needed, so that we + * can see partial progress of submits with large # of + * cmds.. otherwise we could needlessly stall waiting for + * ringbuffer state, simply due to looking at a shadow + * rptr value that has not been updated + */ + if ((ibs % 32) == 0) + update_shadow_rptr(gpu, ring); } get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), @@ -652,7 +667,7 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) regs = a650_protect; count = ARRAY_SIZE(a650_protect); count_max = 48; - } else if (adreno_is_a660(adreno_gpu)) { + } else if (adreno_is_a660_family(adreno_gpu)) { regs = a660_protect; count = ARRAY_SIZE(a660_protect); count_max = 48; @@ -683,7 +698,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) if (adreno_is_a618(adreno_gpu)) return; - if (adreno_is_a640(adreno_gpu)) + if (adreno_is_a640_family(adreno_gpu)) amsbc = 1; if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) { @@ -694,6 +709,13 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) uavflagprd_inv = 2; } + if (adreno_is_7c3(adreno_gpu)) { + lower_bit = 1; + amsbc = 1; + rgb565_predicator = 1; + uavflagprd_inv = 2; + } + gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1); gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1); @@ -740,6 +762,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; + const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE]; u32 *buf = msm_gem_get_vaddr(obj); bool ret = false; @@ -756,8 +779,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, * * a660 targets have all the critical security fixes from the start */ - if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) || - adreno_is_a640(adreno_gpu)) { + if (!strcmp(sqe_name, "a630_sqe.fw")) { /* * If the lowest nibble is 0xa that is an indication that this * microcode has been patched. The actual version is in dword @@ -778,7 +800,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, DRM_DEV_ERROR(&gpu->pdev->dev, "a630 SQE ucode is too old. Have version %x need at least %x\n", buf[0] & 0xfff, 0x190); - } else if (adreno_is_a650(adreno_gpu)) { + } else if (!strcmp(sqe_name, "a650_sqe.fw")) { if ((buf[0] & 0xfff) >= 0x095) { ret = true; goto out; @@ -787,7 +809,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, DRM_DEV_ERROR(&gpu->pdev->dev, "a650 SQE ucode is too old. Have version %x need at least %x\n", buf[0] & 0xfff, 0x095); - } else if (adreno_is_a660(adreno_gpu)) { + } else if (!strcmp(sqe_name, "a660_sqe.fw")) { ret = true; } else { DRM_DEV_ERROR(&gpu->pdev->dev, @@ -897,7 +919,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu) a6xx_set_hwcg(gpu, true); /* VBIF/GBIF start*/ - if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) { + if (adreno_is_a640_family(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) { gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); @@ -935,13 +958,14 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); - if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) + if (adreno_is_a640_family(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); else gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); - if (adreno_is_a660(adreno_gpu)) + if (adreno_is_a660_family(adreno_gpu)) gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); /* Setting the mem pool size */ @@ -952,8 +976,10 @@ static int a6xx_hw_init(struct msm_gpu *gpu) */ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); - else if (adreno_is_a640(adreno_gpu)) + else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu)) gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200); + else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); else gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000); @@ -990,13 +1016,15 @@ static int a6xx_hw_init(struct msm_gpu *gpu) /* Protect registers from the CP */ a6xx_set_cp_protect(gpu); - if (adreno_is_a660(adreno_gpu)) { + if (adreno_is_a660_family(adreno_gpu)) { gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); - /* Set dualQ + disable afull for A660 GPU but not for A635 */ - gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); } + /* Set dualQ + disable afull for A660 GPU */ + if (adreno_is_a660(adreno_gpu)) + gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); + /* Enable expanded apriv for targets that support it */ if (gpu->hw_apriv) { gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, @@ -1383,13 +1411,13 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - u32 cntl1_regval = 0; + u32 gpu_scid, cntl1_regval = 0; if (IS_ERR(a6xx_gpu->llc_mmio)) return; if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { - u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); gpu_scid &= 0x1f; cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | @@ -1409,26 +1437,34 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) } } - if (cntl1_regval) { + if (!cntl1_regval) + return; + + /* + * Program the slice IDs for the various GPU blocks and GPU MMU + * pagetables + */ + if (!a6xx_gpu->have_mmu500) { + a6xx_llc_write(a6xx_gpu, + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval); + /* - * Program the slice IDs for the various GPU blocks and GPU MMU - * pagetables + * Program cacheability overrides to not allocate cache + * lines on a write miss */ - if (a6xx_gpu->have_mmu500) - gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), - cntl1_regval); - else { - a6xx_llc_write(a6xx_gpu, - REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval); - - /* - * Program cacheability overrides to not allocate cache - * lines on a write miss - */ - a6xx_llc_rmw(a6xx_gpu, - REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03); - } + a6xx_llc_rmw(a6xx_gpu, + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03); + return; } + + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); + + /* On A660, the SCID programming for UCHE traffic is done in + * A6XX_GBIF_SCACHE_CNTL0[14:10] + */ + if (adreno_is_a660_family(adreno_gpu)) + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | + (1 << 8), (gpu_scid << 10) | (1 << 8)); } static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) @@ -1667,11 +1703,11 @@ static u32 a618_get_speed_bin(u32 fuse) return UINT_MAX; } -static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse) +static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) { u32 val = UINT_MAX; - if (revn == 618) + if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev)) val = a618_get_speed_bin(fuse); if (val == UINT_MAX) { @@ -1684,14 +1720,13 @@ static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse) return (1 << val); } -static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, - u32 revn) +static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) { u32 supp_hw = UINT_MAX; - u16 speedbin; + u32 speedbin; int ret; - ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin); + ret = nvmem_cell_read_variable_le_u32(dev, "speed_bin", &speedbin); /* * -ENOENT means that the platform doesn't support speedbin which is * fine @@ -1704,9 +1739,8 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, ret); goto done; } - speedbin = le16_to_cpu(speedbin); - supp_hw = fuse_to_supp_hw(dev, revn, speedbin); + supp_hw = fuse_to_supp_hw(dev, rev, speedbin); done: ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); @@ -1772,12 +1806,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) */ info = adreno_info(config->rev); - if (info && (info->revn == 650 || info->revn == 660)) + if (info && (info->revn == 650 || info->revn == 660 || + adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev))) adreno_gpu->base.hw_apriv = true; a6xx_llc_slices_init(pdev, a6xx_gpu); - ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn); + ret = a6xx_set_supported_hw(&pdev->dev, config->rev); if (ret) { a6xx_destroy(&(a6xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index 919433732b43..d4c65bf0a1b7 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -382,6 +382,36 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) msg->cnoc_cmds_data[1][0] = 0x60000001; } +static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; + + msg->ddr_cmds_addrs[0] = 0x50004; + msg->ddr_cmds_addrs[1] = 0x50000; + msg->ddr_cmds_addrs[2] = 0x50088; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x5006c; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) { /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ @@ -428,10 +458,12 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) if (adreno_is_a618(adreno_gpu)) a618_build_bw_table(&msg); - else if (adreno_is_a640(adreno_gpu)) + else if (adreno_is_a640_family(adreno_gpu)) a640_build_bw_table(&msg); else if (adreno_is_a650(adreno_gpu)) a650_build_bw_table(&msg); + else if (adreno_is_7c3(adreno_gpu)) + adreno_7c3_build_bw_table(&msg); else if (adreno_is_a660(adreno_gpu)) a660_build_bw_table(&msg); else diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 6dad8015c9a1..2a6ce76656aa 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -8,8 +8,6 @@ #include "adreno_gpu.h" -#define ANY_ID 0xff - bool hang_debug = false; MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); module_param_named(hang_debug, hang_debug, bool, 0600); @@ -300,6 +298,30 @@ static const struct adreno_info gpulist[] = { .init = a6xx_gpu_init, .zapfw = "a660_zap.mdt", .hwcg = a660_hwcg, + }, { + .rev = ADRENO_REV(6, 3, 5, ANY_ID), + .name = "Adreno 7c Gen 3", + .fw = { + [ADRENO_FW_SQE] = "a660_sqe.fw", + [ADRENO_FW_GMU] = "a660_gmu.bin", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .hwcg = a660_hwcg, + }, { + .rev = ADRENO_REV(6, 8, 0, ANY_ID), + .revn = 680, + .name = "A680", + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a640_gmu.bin", + }, + .gmem = SZ_2M, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a640_zap.mdt", + .hwcg = a640_hwcg, }, }; @@ -325,6 +347,15 @@ static inline bool _rev_match(uint8_t entry, uint8_t id) return (entry == ANY_ID) || (entry == id); } +bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2) +{ + + return _rev_match(rev1.core, rev2.core) && + _rev_match(rev1.major, rev2.major) && + _rev_match(rev1.minor, rev2.minor) && + _rev_match(rev1.patchid, rev2.patchid); +} + const struct adreno_info *adreno_info(struct adreno_rev rev) { int i; @@ -332,10 +363,7 @@ const struct adreno_info *adreno_info(struct adreno_rev rev) /* identify gpu: */ for (i = 0; i < ARRAY_SIZE(gpulist); i++) { const struct adreno_info *info = &gpulist[i]; - if (_rev_match(info->rev.core, rev.core) && - _rev_match(info->rev.major, rev.major) && - _rev_match(info->rev.minor, rev.minor) && - _rev_match(info->rev.patchid, rev.patchid)) + if (adreno_cmp_rev(info->rev, rev)) return info; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 8dbe0d157520..225c277a6223 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -42,6 +42,8 @@ struct adreno_rev { uint8_t patchid; }; +#define ANY_ID 0xff + #define ADRENO_REV(core, major, minor, patchid) \ ((struct adreno_rev){ core, major, minor, patchid }) @@ -141,6 +143,8 @@ struct adreno_platform_config { __ret; \ }) +bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2); + static inline bool adreno_is_a2xx(struct adreno_gpu *gpu) { return (gpu->revn < 300); @@ -237,9 +241,9 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu) return gpu->revn == 630; } -static inline int adreno_is_a640(struct adreno_gpu *gpu) +static inline int adreno_is_a640_family(struct adreno_gpu *gpu) { - return gpu->revn == 640; + return (gpu->revn == 640) || (gpu->revn == 680); } static inline int adreno_is_a650(struct adreno_gpu *gpu) @@ -247,15 +251,27 @@ static inline int adreno_is_a650(struct adreno_gpu *gpu) return gpu->revn == 650; } +static inline int adreno_is_7c3(struct adreno_gpu *gpu) +{ + /* The order of args is important here to handle ANY_ID correctly */ + return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev); +} + static inline int adreno_is_a660(struct adreno_gpu *gpu) { return gpu->revn == 660; } +static inline int adreno_is_a660_family(struct adreno_gpu *gpu) +{ + return adreno_is_a660(gpu) || adreno_is_7c3(gpu); +} + /* check for a650, a660, or any derivatives */ static inline int adreno_is_a650_family(struct adreno_gpu *gpu) { - return gpu->revn == 650 || gpu->revn == 620 || gpu->revn == 660; + return gpu->revn == 650 || gpu->revn == 620 || + adreno_is_a660_family(gpu); } int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 9a5c70c87cc8..768012243b44 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -30,12 +30,6 @@ #include "dpu_core_perf.h" #include "dpu_trace.h" -#define DPU_DRM_BLEND_OP_NOT_DEFINED 0 -#define DPU_DRM_BLEND_OP_OPAQUE 1 -#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2 -#define DPU_DRM_BLEND_OP_COVERAGE 3 -#define DPU_DRM_BLEND_OP_MAX 4 - /* layer mixer index on dpu_crtc */ #define LEFT_MIXER 0 #define RIGHT_MIXER 1 @@ -146,20 +140,43 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, { struct dpu_hw_mixer *lm = mixer->hw_lm; uint32_t blend_op; + uint32_t fg_alpha, bg_alpha; - /* default to opaque blending */ - blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | - DPU_BLEND_BG_ALPHA_BG_CONST; + fg_alpha = pstate->base.alpha >> 8; + bg_alpha = 0xff - fg_alpha; - if (format->alpha_enable) { + /* default to opaque blending */ + if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || + !format->alpha_enable) { + blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | + DPU_BLEND_BG_ALPHA_BG_CONST; + } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { + blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | + DPU_BLEND_BG_ALPHA_FG_PIXEL; + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= DPU_BLEND_BG_MOD_ALPHA | + DPU_BLEND_BG_INV_MOD_ALPHA; + } else { + blend_op |= DPU_BLEND_BG_INV_ALPHA; + } + } else { /* coverage blending */ blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | - DPU_BLEND_BG_ALPHA_FG_PIXEL | - DPU_BLEND_BG_INV_ALPHA; + DPU_BLEND_BG_ALPHA_FG_PIXEL; + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= DPU_BLEND_FG_MOD_ALPHA | + DPU_BLEND_FG_INV_MOD_ALPHA | + DPU_BLEND_BG_MOD_ALPHA | + DPU_BLEND_BG_INV_MOD_ALPHA; + } else { + blend_op |= DPU_BLEND_BG_INV_ALPHA; + } } lm->ops.setup_blend_config(lm, pstate->stage, - 0xFF, 0, blend_op); + fg_alpha, bg_alpha, blend_op); DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", &format->base.pixel_format, format->alpha_enable, blend_op); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 1c04b7cce43e..0e9d3fa1544b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -274,20 +274,20 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, /* return EWOULDBLOCK since we know the wait isn't necessary */ if (phys_enc->enable_state == DPU_ENC_DISABLED) { - DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d", + DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d\n", DRMID(phys_enc->parent), intr_idx, irq->irq_idx); return -EWOULDBLOCK; } if (irq->irq_idx < 0) { - DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s", + DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s\n", DRMID(phys_enc->parent), intr_idx, irq->name); return 0; } - DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d", + DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d\n", DRMID(phys_enc->parent), intr_idx, irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0, atomic_read(wait_info->atomic_cnt)); @@ -303,8 +303,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, if (irq_status) { unsigned long flags; - DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, " - "irq=%d, pp=%d, atomic_cnt=%d", + DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n", DRMID(phys_enc->parent), intr_idx, irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0, @@ -315,8 +314,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, ret = 0; } else { ret = -ETIMEDOUT; - DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, " - "irq=%d, pp=%d, atomic_cnt=%d", + DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n", DRMID(phys_enc->parent), intr_idx, irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index d01c4c919504..2e482cdd7b3c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -974,6 +974,7 @@ static const struct dpu_perf_cfg sdm845_perf_data = { .amortizable_threshold = 25, .min_prefill_lines = 24, .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfff0, 0xf000, 0xffff}, .qos_lut_tbl = { {.nentry = ARRAY_SIZE(sdm845_qos_linear), .entries = sdm845_qos_linear @@ -1001,6 +1002,7 @@ static const struct dpu_perf_cfg sc7180_perf_data = { .min_dram_ib = 1600000, .min_prefill_lines = 24, .danger_lut_tbl = {0xff, 0xffff, 0x0}, + .safe_lut_tbl = {0xfff0, 0xff00, 0xffff}, .qos_lut_tbl = { {.nentry = ARRAY_SIZE(sc7180_qos_linear), .entries = sc7180_qos_linear @@ -1028,6 +1030,7 @@ static const struct dpu_perf_cfg sm8150_perf_data = { .min_dram_ib = 800000, .min_prefill_lines = 24, .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfff8, 0xf000, 0xffff}, .qos_lut_tbl = { {.nentry = ARRAY_SIZE(sm8150_qos_linear), .entries = sm8150_qos_linear @@ -1056,6 +1059,7 @@ static const struct dpu_perf_cfg sm8250_perf_data = { .min_dram_ib = 800000, .min_prefill_lines = 35, .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfff0, 0xff00, 0xffff}, .qos_lut_tbl = { {.nentry = ARRAY_SIZE(sc7180_qos_linear), .entries = sc7180_qos_linear @@ -1084,6 +1088,7 @@ static const struct dpu_perf_cfg sc7280_perf_data = { .min_dram_ib = 1600000, .min_prefill_lines = 24, .danger_lut_tbl = {0xffff, 0xffff, 0x0}, + .safe_lut_tbl = {0xff00, 0xff00, 0xffff}, .qos_lut_tbl = { {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), .entries = sc7180_qos_macrotile diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index f8a74f6cdc4c..64740ddb983e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -345,10 +345,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx) int i; for (i = 0; i < ctx->mixer_count; i++) { - DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0); - DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0); - DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0); - DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0); + enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id; + + DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0); + DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0); + DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0); + DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0); } DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 4fd913522931..ae48f41821cf 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -471,30 +471,68 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev, struct dpu_kms *dpu_kms) { struct drm_encoder *encoder = NULL; + struct msm_display_info info; int i, rc = 0; if (!(priv->dsi[0] || priv->dsi[1])) return rc; - /*TODO: Support two independent DSI connectors */ - encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); - if (IS_ERR(encoder)) { - DPU_ERROR("encoder init failed for dsi display\n"); - return PTR_ERR(encoder); - } - - priv->encoders[priv->num_encoders++] = encoder; - + /* + * We support following confiurations: + * - Single DSI host (dsi0 or dsi1) + * - Two independent DSI hosts + * - Bonded DSI0 and DSI1 hosts + * + * TODO: Support swapping DSI0 and DSI1 in the bonded setup. + */ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { + int other = (i + 1) % 2; + if (!priv->dsi[i]) continue; + if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && + !msm_dsi_is_master_dsi(priv->dsi[i])) + continue; + + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); + if (IS_ERR(encoder)) { + DPU_ERROR("encoder init failed for dsi display\n"); + return PTR_ERR(encoder); + } + + priv->encoders[priv->num_encoders++] = encoder; + + memset(&info, 0, sizeof(info)); + info.intf_type = encoder->encoder_type; + rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); if (rc) { DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", i, rc); break; } + + info.h_tile_instance[info.num_of_h_tiles++] = i; + info.capabilities = msm_dsi_is_cmd_mode(priv->dsi[i]) ? + MSM_DISPLAY_CAP_CMD_MODE : + MSM_DISPLAY_CAP_VID_MODE; + + if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) { + rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder); + if (rc) { + DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", + other, rc); + break; + } + + info.h_tile_instance[info.num_of_h_tiles++] = other; + } + + rc = dpu_encoder_setup(dev, encoder, &info); + if (rc) + DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", + encoder->base.id, rc); } return rc; @@ -505,6 +543,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev, struct dpu_kms *dpu_kms) { struct drm_encoder *encoder = NULL; + struct msm_display_info info; int rc = 0; if (!priv->dp) @@ -516,6 +555,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev, return PTR_ERR(encoder); } + memset(&info, 0, sizeof(info)); rc = msm_dp_modeset_init(priv->dp, dev, encoder); if (rc) { DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); @@ -524,6 +564,14 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev, } priv->encoders[priv->num_encoders++] = encoder; + + info.num_of_h_tiles = 1; + info.capabilities = MSM_DISPLAY_CAP_VID_MODE; + info.intf_type = encoder->encoder_type; + rc = dpu_encoder_setup(dev, encoder, &info); + if (rc) + DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", + encoder->base.id, rc); return rc; } @@ -726,41 +774,6 @@ static void dpu_kms_destroy(struct msm_kms *kms) msm_kms_destroy(&dpu_kms->base); } -static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, - struct drm_encoder *encoder, - bool cmd_mode) -{ - struct msm_display_info info; - struct msm_drm_private *priv = encoder->dev->dev_private; - int i, rc = 0; - - memset(&info, 0, sizeof(info)); - - info.intf_type = encoder->encoder_type; - info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE : - MSM_DISPLAY_CAP_VID_MODE; - - switch (info.intf_type) { - case DRM_MODE_ENCODER_DSI: - /* TODO: No support for DSI swap */ - for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { - if (priv->dsi[i]) { - info.h_tile_instance[info.num_of_h_tiles] = i; - info.num_of_h_tiles++; - } - } - break; - case DRM_MODE_ENCODER_TMDS: - info.num_of_h_tiles = 1; - break; - } - - rc = dpu_encoder_setup(encoder->dev, encoder, &info); - if (rc) - DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", - encoder->base.id, rc); -} - static irqreturn_t dpu_irq(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -863,7 +876,6 @@ static const struct msm_kms_funcs kms_funcs = { .get_format = dpu_get_msm_format, .round_pixclk = dpu_kms_round_pixclk, .destroy = dpu_kms_destroy, - .set_encoder_mode = _dpu_kms_set_encoder_mode, .snapshot = dpu_kms_mdp_snapshot, #ifdef CONFIG_DEBUG_FS .debugfs_init = dpu_kms_debugfs_init, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index ec4a6f04394a..c989621209aa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -1339,9 +1339,7 @@ static void dpu_plane_reset(struct drm_plane *plane) return; } - pstate->base.plane = plane; - - plane->state = &pstate->base; + __drm_atomic_helper_plane_reset(plane, &pstate->base); } #ifdef CONFIG_DEBUG_FS @@ -1647,6 +1645,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, if (ret) DPU_ERROR("failed to install zpos property, rc = %d\n", ret); + drm_plane_create_alpha_property(plane); + drm_plane_create_blend_mode_property(plane, + BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE)); + drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, DRM_MODE_ROTATE_0 | diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index 4a5b518288b0..cdcaf470f148 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -19,30 +19,12 @@ static int mdp4_hw_init(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); struct drm_device *dev = mdp4_kms->dev; - uint32_t version, major, minor, dmap_cfg, vg_cfg; + u32 dmap_cfg, vg_cfg; unsigned long clk; int ret = 0; pm_runtime_get_sync(dev->dev); - mdp4_enable(mdp4_kms); - version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); - mdp4_disable(mdp4_kms); - - major = FIELD(version, MDP4_VERSION_MAJOR); - minor = FIELD(version, MDP4_VERSION_MINOR); - - DBG("found MDP4 version v%d.%d", major, minor); - - if (major != 4) { - DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n", - major, minor); - ret = -ENXIO; - goto out; - } - - mdp4_kms->rev = minor; - if (mdp4_kms->rev > 1) { mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f); @@ -88,7 +70,6 @@ static int mdp4_hw_init(struct msm_kms *kms) if (mdp4_kms->rev > 1) mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); -out: pm_runtime_put_sync(dev->dev); return ret; @@ -108,13 +89,6 @@ static void mdp4_disable_commit(struct msm_kms *kms) static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - int i; - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - - /* see 119ecb7fd */ - for_each_new_crtc_in_state(state, crtc, crtc_state, i) - drm_crtc_vblank_get(crtc); } static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask) @@ -133,12 +107,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask) static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask) { - struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - struct drm_crtc *crtc; - - /* see 119ecb7fd */ - for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask) - drm_crtc_vblank_put(crtc); } static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, @@ -411,14 +379,32 @@ fail: return ret; } +static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms, + u32 *major, u32 *minor) +{ + struct drm_device *dev = mdp4_kms->dev; + u32 version; + + mdp4_enable(mdp4_kms); + version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); + mdp4_disable(mdp4_kms); + + *major = FIELD(version, MDP4_VERSION_MAJOR); + *minor = FIELD(version, MDP4_VERSION_MINOR); + + DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor); +} + struct msm_kms *mdp4_kms_init(struct drm_device *dev) { struct platform_device *pdev = to_platform_device(dev->dev); struct mdp4_platform_config *config = mdp4_get_config(pdev); + struct msm_drm_private *priv = dev->dev_private; struct mdp4_kms *mdp4_kms; struct msm_kms *kms = NULL; struct msm_gem_address_space *aspace; int irq, ret; + u32 major, minor; mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); if (!mdp4_kms) { @@ -433,7 +419,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } - kms = &mdp4_kms->base.base; + priv->kms = &mdp4_kms->base.base; + kms = priv->kms; mdp4_kms->dev = dev; @@ -479,15 +466,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) if (IS_ERR(mdp4_kms->pclk)) mdp4_kms->pclk = NULL; - if (mdp4_kms->rev >= 2) { - mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); - if (IS_ERR(mdp4_kms->lut_clk)) { - DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n"); - ret = PTR_ERR(mdp4_kms->lut_clk); - goto fail; - } - } - mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); if (IS_ERR(mdp4_kms->axi_clk)) { DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n"); @@ -496,8 +474,27 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) } clk_set_rate(mdp4_kms->clk, config->max_clk); - if (mdp4_kms->lut_clk) + + read_mdp_hw_revision(mdp4_kms, &major, &minor); + + if (major != 4) { + DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto fail; + } + + mdp4_kms->rev = minor; + + if (mdp4_kms->rev >= 2) { + mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); + if (IS_ERR(mdp4_kms->lut_clk)) { + DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n"); + ret = PTR_ERR(mdp4_kms->lut_clk); + goto fail; + } clk_set_rate(mdp4_kms->lut_clk, config->max_clk); + } pm_runtime_enable(dev->dev); mdp4_kms->rpm_enabled = true; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index 81b0c7cf954e..1220f2b20e05 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -737,7 +737,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, } /* - * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI + * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. * Single FLUSH is supported from hw rev v3.0. diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 15aed45022bc..b3b42672b2d4 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -209,13 +209,6 @@ static int mdp5_set_split_display(struct msm_kms *kms, slave_encoder); } -static void mdp5_set_encoder_mode(struct msm_kms *kms, - struct drm_encoder *encoder, - bool cmd_mode) -{ - mdp5_encoder_set_intf_mode(encoder, cmd_mode); -} - static void mdp5_kms_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); @@ -287,7 +280,6 @@ static const struct mdp_kms_funcs kms_funcs = { .get_format = mdp_get_format, .round_pixclk = mdp5_round_pixclk, .set_split_display = mdp5_set_split_display, - .set_encoder_mode = mdp5_set_encoder_mode, .destroy = mdp5_kms_destroy, #ifdef CONFIG_DEBUG_FS .debugfs_init = mdp5_kms_debugfs_init, @@ -448,6 +440,9 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, } ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); + if (!ret) + mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id])); + break; } default: diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h index c92a9508c8d3..c22b07f68670 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h @@ -16,7 +16,6 @@ #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/ktime.h> -#include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/dma-buf.h> #include <linux/slab.h> diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 4a3293b590b0..eb40d8413bca 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -353,6 +353,9 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, if (!(aux->retry_cnt % MAX_AUX_RETRIES)) dp_catalog_aux_update_cfg(aux->catalog); } + /* reset aux if link is in connected state */ + if (dp_catalog_link_is_connected(aux->catalog)) + dp_catalog_aux_reset(aux->catalog); } else { aux->retry_cnt = 0; switch (aux->aux_error_num) { diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index ca96e3514790..21b6ac857710 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -372,6 +372,7 @@ void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + DRM_DEBUG_DP("enable=%d\n", enable); if (enable) { /* * To make sure link reg writes happens before other operation, @@ -580,6 +581,7 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, config = (en ? config | intr_mask : config & ~intr_mask); + DRM_DEBUG_DP("intr_mask=%#x config=%#x\n", intr_mask, config); dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, config & DP_DP_HPD_INT_MASK); } @@ -610,6 +612,7 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) u32 status; status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + DRM_DEBUG_DP("aux status: %#x\n", status); status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT; status &= DP_DP_HPD_STATE_STATUS_BITS_MASK; @@ -685,6 +688,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, /* Make sure to clear the current pattern before starting a new one */ dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); + DRM_DEBUG_DP("pattern: %#x\n", pattern); switch (pattern) { case DP_PHY_TEST_PATTERN_D10_2: dp_write_link(catalog, REG_DP_STATE_CTRL, @@ -745,7 +749,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, DP_STATE_CTRL_LINK_TRAINING_PATTERN4); break; default: - DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern); + DRM_DEBUG_DP("No valid test pattern requested: %#x\n", pattern); break; } } @@ -928,7 +932,7 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog) select = dp_catalog->audio_data; acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); - DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl); + DRM_DEBUG_DP("select: %#x, acr_ctrl: %#x\n", select, acr_ctrl); dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index ee221d835fa0..62e75dc8afc6 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -81,13 +81,6 @@ struct dp_ctrl_private { struct completion video_comp; }; -struct dp_cr_status { - u8 lane_0_1; - u8 lane_2_3; -}; - -#define DP_LANE0_1_CR_DONE 0x11 - static int dp_aux_link_configure(struct drm_dp_aux *aux, struct dp_link_info *link) { @@ -120,7 +113,7 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES)) pr_warn("PUSH_IDLE pattern timedout\n"); - pr_debug("mainlink off done\n"); + DRM_DEBUG_DP("mainlink off done\n"); } static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) @@ -1011,6 +1004,8 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) u32 voltage_swing_level = link->phy_params.v_level; u32 pre_emphasis_level = link->phy_params.p_level; + DRM_DEBUG_DP("voltage level: %d emphasis level: %d\n", voltage_swing_level, + pre_emphasis_level); ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog, voltage_swing_level, pre_emphasis_level); @@ -1078,7 +1073,7 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, } static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, - struct dp_cr_status *cr, int *training_step) + int *training_step) { int tries, old_v_level, ret = 0; u8 link_status[DP_LINK_STATUS_SIZE]; @@ -1107,9 +1102,6 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, if (ret) return ret; - cr->lane_0_1 = link_status[0]; - cr->lane_2_3 = link_status[1]; - if (drm_dp_clock_recovery_ok(link_status, ctrl->link->link_params.num_lanes)) { return 0; @@ -1186,7 +1178,7 @@ static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl) } static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, - struct dp_cr_status *cr, int *training_step) + int *training_step) { int tries = 0, ret = 0; char pattern; @@ -1202,10 +1194,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, else pattern = DP_TRAINING_PATTERN_2; - ret = dp_ctrl_update_vx_px(ctrl); - if (ret) - return ret; - ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern); if (ret) return ret; @@ -1218,8 +1206,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, ret = dp_ctrl_read_link_status(ctrl, link_status); if (ret) return ret; - cr->lane_0_1 = link_status[0]; - cr->lane_2_3 = link_status[1]; if (drm_dp_channel_eq_ok(link_status, ctrl->link->link_params.num_lanes)) { @@ -1239,7 +1225,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl); static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, - struct dp_cr_status *cr, int *training_step) + int *training_step) { int ret = 0; u8 encoding = DP_SET_ANSI_8B10B; @@ -1255,7 +1241,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &encoding, 1); - ret = dp_ctrl_link_train_1(ctrl, cr, training_step); + ret = dp_ctrl_link_train_1(ctrl, training_step); if (ret) { DRM_ERROR("link training #1 failed. ret=%d\n", ret); goto end; @@ -1264,7 +1250,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, /* print success info as this is a result of user initiated action */ DRM_DEBUG_DP("link training #1 successful\n"); - ret = dp_ctrl_link_train_2(ctrl, cr, training_step); + ret = dp_ctrl_link_train_2(ctrl, training_step); if (ret) { DRM_ERROR("link training #2 failed. ret=%d\n", ret); goto end; @@ -1280,7 +1266,7 @@ end: } static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, - struct dp_cr_status *cr, int *training_step) + int *training_step) { int ret = 0; @@ -1295,7 +1281,7 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, * a link training pattern, we have to first do soft reset. */ - ret = dp_ctrl_link_train(ctrl, cr, training_step); + ret = dp_ctrl_link_train(ctrl, training_step); return ret; } @@ -1382,6 +1368,7 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset) if (reset) dp_catalog_ctrl_reset(ctrl->catalog); + DRM_DEBUG_DP("flip=%d\n", flip); dp_catalog_ctrl_phy_reset(ctrl->catalog); phy_init(phy); dp_catalog_ctrl_enable_irq(ctrl->catalog, true); @@ -1492,14 +1479,16 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) { int ret = 0; - struct dp_cr_status cr; int training_step = DP_TRAINING_NONE; dp_ctrl_push_idle(&ctrl->dp_ctrl); + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; - ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step); + ret = dp_ctrl_setup_main_link(ctrl, &training_step); if (ret) goto end; @@ -1526,7 +1515,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) * running. Add the global reset just before disabling the * link clocks and core clocks. */ - ret = dp_ctrl_off(&ctrl->dp_ctrl); + ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl); if (ret) { DRM_ERROR("failed to disable DP controller\n"); return ret; @@ -1630,6 +1619,35 @@ void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl) } } +static bool dp_ctrl_clock_recovery_any_ok( + const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + int reduced_cnt; + + if (lane_count <= 1) + return false; + + /* + * only interested in the lane number after reduced + * lane_count = 4, then only interested in 2 lanes + * lane_count = 2, then only interested in 1 lane + */ + reduced_cnt = lane_count >> 1; + + return drm_dp_clock_recovery_ok(link_status, reduced_cnt); +} + +static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl) +{ + u8 link_status[DP_LINK_STATUS_SIZE]; + int num_lanes = ctrl->link->link_params.num_lanes; + + dp_ctrl_read_link_status(ctrl, link_status); + + return drm_dp_channel_eq_ok(link_status, num_lanes); +} + int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) { int rc = 0; @@ -1637,7 +1655,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) u32 rate = 0; int link_train_max_retries = 5; u32 const phy_cts_pixel_clk_khz = 148500; - struct dp_cr_status cr; + u8 link_status[DP_LINK_STATUS_SIZE]; unsigned int training_step; if (!dp_ctrl) @@ -1664,6 +1682,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate); + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + rc = dp_ctrl_enable_mainlink_clocks(ctrl); if (rc) return rc; @@ -1677,19 +1698,21 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } training_step = DP_TRAINING_NONE; - rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step); + rc = dp_ctrl_setup_main_link(ctrl, &training_step); if (rc == 0) { /* training completed successfully */ break; } else if (training_step == DP_TRAINING_1) { /* link train_1 failed */ - if (!dp_catalog_link_is_connected(ctrl->catalog)) { + if (!dp_catalog_link_is_connected(ctrl->catalog)) break; - } + + dp_ctrl_read_link_status(ctrl, link_status); rc = dp_ctrl_link_rate_down_shift(ctrl); if (rc < 0) { /* already in RBR = 1.6G */ - if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) { + if (dp_ctrl_clock_recovery_any_ok(link_status, + ctrl->link->link_params.num_lanes)) { /* * some lanes are ready, * reduce lane number @@ -1705,12 +1728,18 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } } } else if (training_step == DP_TRAINING_2) { - /* link train_2 failed, lower lane rate */ - if (!dp_catalog_link_is_connected(ctrl->catalog)) { + /* link train_2 failed */ + if (!dp_catalog_link_is_connected(ctrl->catalog)) break; - } - rc = dp_ctrl_link_lane_down_shift(ctrl); + dp_ctrl_read_link_status(ctrl, link_status); + + if (!drm_dp_clock_recovery_ok(link_status, + ctrl->link->link_params.num_lanes)) + rc = dp_ctrl_link_rate_down_shift(ctrl); + else + rc = dp_ctrl_link_lane_down_shift(ctrl); + if (rc < 0) { /* end with failure */ break; /* lane == 1 already */ @@ -1721,17 +1750,19 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) return rc; - /* stop txing train pattern */ - dp_ctrl_clear_training_pattern(ctrl); + if (rc == 0) { /* link train successfully */ + /* + * do not stop train pattern here + * stop link training at on_stream + * to pass compliance test + */ + } else { + /* + * link training failed + * end txing train pattern here + */ + dp_ctrl_clear_training_pattern(ctrl); - /* - * keep transmitting idle pattern until video ready - * to avoid main link from loss of sync - */ - if (rc == 0) /* link train successfully */ - dp_ctrl_push_idle(dp_ctrl); - else { - /* link training failed */ dp_ctrl_deinitialize_mainlink(ctrl); rc = -ECONNRESET; } @@ -1739,9 +1770,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) return rc; } +static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) +{ + int training_step = DP_TRAINING_NONE; + + return dp_ctrl_setup_main_link(ctrl, &training_step); +} + int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) { - u32 rate = 0; int ret = 0; bool mainlink_ready = false; struct dp_ctrl_private *ctrl; @@ -1751,10 +1788,6 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); - rate = ctrl->panel->link_info.rate; - - ctrl->link->link_params.rate = rate; - ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes; ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n", @@ -1769,6 +1802,12 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) } } + if (!dp_ctrl_channel_eq_ok(ctrl)) + dp_ctrl_link_retrain(ctrl); + + /* stop txing train pattern to end link training */ + dp_ctrl_clear_training_pattern(ctrl); + ret = dp_ctrl_enable_stream_clocks(ctrl); if (ret) { DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 051c1be1de7e..419fd4a14cbf 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -55,7 +55,6 @@ enum { EV_HPD_INIT_SETUP, EV_HPD_PLUG_INT, EV_IRQ_HPD_INT, - EV_HPD_REPLUG_INT, EV_HPD_UNPLUG_INT, EV_USER_NOTIFICATION, EV_CONNECT_PENDING_TIMEOUT, @@ -102,8 +101,6 @@ struct dp_display_private { struct dp_display_mode dp_mode; struct msm_dp dp_display; - bool encoder_mode_set; - /* wait for audio signaling */ struct completion audio_comp; @@ -267,6 +264,8 @@ static bool dp_display_is_ds_bridge(struct dp_panel *panel) static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) { + DRM_DEBUG_DP("present=%#x sink_count=%d\n", dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT], + dp->link->sink_count); return dp_display_is_ds_bridge(dp->panel) && (dp->link->sink_count == 0); } @@ -283,20 +282,6 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display) } -static void dp_display_set_encoder_mode(struct dp_display_private *dp) -{ - struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private; - struct msm_kms *kms = priv->kms; - - if (!dp->encoder_mode_set && dp->dp_display.encoder && - kms->funcs->set_encoder_mode) { - kms->funcs->set_encoder_mode(kms, - dp->dp_display.encoder, false); - - dp->encoder_mode_set = true; - } -} - static int dp_display_send_hpd_notification(struct dp_display_private *dp, bool hpd) { @@ -312,6 +297,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, dp->dp_display.is_connected = hpd; + DRM_DEBUG_DP("hpd=%d\n", hpd); dp_display_send_hpd_event(&dp->dp_display); return 0; @@ -361,6 +347,7 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset) { bool flip = false; + DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized); if (dp->core_initialized) { DRM_DEBUG_DP("DP core already initialized\n"); return; @@ -369,8 +356,6 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset) if (dp->usbpd->orientation == ORIENTATION_CC2) flip = true; - dp_display_set_encoder_mode(dp); - dp_power_init(dp->power, flip); dp_ctrl_host_init(dp->ctrl, flip, reset); dp_aux_init(dp->aux); @@ -465,8 +450,10 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp) { u32 sink_request = dp->link->sink_request; + DRM_DEBUG_DP("%d\n", sink_request); if (dp->hpd_state == ST_DISCONNECTED) { if (sink_request & DP_LINK_STATUS_UPDATED) { + DRM_DEBUG_DP("Disconnected sink_request: %d\n", sink_request); DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n"); return -EINVAL; } @@ -498,6 +485,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev) rc = dp_link_process_request(dp->link); if (!rc) { sink_request = dp->link->sink_request; + DRM_DEBUG_DP("hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request); if (sink_request & DS_PORT_STATUS_CHANGED) rc = dp_display_handle_port_ststus_changed(dp); else @@ -520,6 +508,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) mutex_lock(&dp->event_mutex); state = dp->hpd_state; + DRM_DEBUG_DP("hpd_state=%d\n", state); if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { mutex_unlock(&dp->event_mutex); return 0; @@ -655,6 +644,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) /* start sentinel checking in case of missing uevent */ dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); + DRM_DEBUG_DP("hpd_state=%d\n", state); /* signal the disconnect event early to ensure proper teardown */ dp_display_handle_plugged_change(g_dp_display, false); @@ -713,6 +703,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) if (ret == -ECONNRESET) { /* cable unplugged */ dp->core_initialized = false; } + DRM_DEBUG_DP("hpd_state=%d\n", state); mutex_unlock(&dp->event_mutex); @@ -854,6 +845,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data) dp_display = g_dp_display; + DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count); if (dp_display->power_on) { DRM_DEBUG_DP("Link already setup, return\n"); return 0; @@ -915,6 +907,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) dp_display->power_on = false; + DRM_DEBUG_DP("sink count: %d\n", dp->link->sink_count); return 0; } @@ -1014,10 +1007,8 @@ int dp_display_get_test_bpp(struct msm_dp *dp) void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) { struct dp_display_private *dp_display; - struct drm_device *drm; dp_display = container_of(dp, struct dp_display_private, dp_display); - drm = dp->drm_dev; /* * if we are reading registers we need the link clocks to be on @@ -1118,9 +1109,6 @@ static int hpd_event_thread(void *data) case EV_IRQ_HPD_INT: dp_irq_hpd_handle(dp_priv, todo->data); break; - case EV_HPD_REPLUG_INT: - /* do nothing */ - break; case EV_USER_NOTIFICATION: dp_display_send_hpd_notification(dp_priv, todo->data); @@ -1162,12 +1150,11 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog); + DRM_DEBUG_DP("hpd isr status=%#x\n", hpd_isr_status); if (hpd_isr_status & 0x0F) { /* hpd related interrupts */ - if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK || - hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { + if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK) dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); - } if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { /* stop sentinel connect pending checking */ @@ -1175,8 +1162,10 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); } - if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) - dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0); + if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3); + } if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK) dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); @@ -1285,12 +1274,15 @@ static int dp_pm_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct msm_dp *dp_display = platform_get_drvdata(pdev); struct dp_display_private *dp; - u32 status; + int sink_count = 0; dp = container_of(dp_display, struct dp_display_private, dp_display); mutex_lock(&dp->event_mutex); + DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n", + dp->core_initialized, dp_display->power_on); + /* start from disconnected state */ dp->hpd_state = ST_DISCONNECTED; @@ -1299,18 +1291,33 @@ static int dp_pm_resume(struct device *dev) dp_catalog_ctrl_hpd_config(dp->catalog); - status = dp_catalog_link_is_connected(dp->catalog); + /* + * set sink to normal operation mode -- D0 + * before dpcd read + */ + dp_link_psm_config(dp->link, &dp->panel->link_info, false); + + if (dp_catalog_link_is_connected(dp->catalog)) { + sink_count = drm_dp_read_sink_count(dp->aux); + if (sink_count < 0) + sink_count = 0; + } + dp->link->sink_count = sink_count; /* * can not declared display is connected unless * HDMI cable is plugged in and sink_count of * dongle become 1 */ - if (status && dp->link->sink_count) + if (dp->link->sink_count) dp->dp_display.is_connected = true; else dp->dp_display.is_connected = false; + DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n", + dp->link->sink_count, dp->dp_display.is_connected, + dp->core_initialized, dp_display->power_on); + mutex_unlock(&dp->event_mutex); return 0; @@ -1326,6 +1333,9 @@ static int dp_pm_suspend(struct device *dev) mutex_lock(&dp->event_mutex); + DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n", + dp->core_initialized, dp_display->power_on); + if (dp->core_initialized == true) { /* mainlink enabled */ if (dp_power_clk_status(dp->power, DP_CTRL_PM)) @@ -1339,6 +1349,9 @@ static int dp_pm_suspend(struct device *dev) /* host_init will be called at pm_resume */ dp->core_initialized = false; + DRM_DEBUG_DP("After, core_inited=%d power_on=%d\n", + dp->core_initialized, dp_display->power_on); + mutex_unlock(&dp->event_mutex); return 0; diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 1195044a7a3b..a5bdfc5029de 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -1027,43 +1027,29 @@ int dp_link_process_request(struct dp_link *dp_link) if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { dp_link->sink_request |= DP_TEST_LINK_EDID_READ; - return ret; - } - - ret = dp_link_process_ds_port_status_change(link); - if (!ret) { + } else if (!dp_link_process_ds_port_status_change(link)) { dp_link->sink_request |= DS_PORT_STATUS_CHANGED; - return ret; - } - - ret = dp_link_process_link_training_request(link); - if (!ret) { + } else if (!dp_link_process_link_training_request(link)) { dp_link->sink_request |= DP_TEST_LINK_TRAINING; - return ret; - } - - ret = dp_link_process_phy_test_pattern_request(link); - if (!ret) { + } else if (!dp_link_process_phy_test_pattern_request(link)) { dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; - return ret; - } - - ret = dp_link_process_link_status_update(link); - if (!ret) { - dp_link->sink_request |= DP_LINK_STATUS_UPDATED; - return ret; - } - - if (dp_link_is_video_pattern_requested(link)) { - ret = 0; - dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; - } - - if (dp_link_is_audio_pattern_requested(link)) { - dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; - return -EINVAL; + } else { + ret = dp_link_process_link_status_update(link); + if (!ret) { + dp_link->sink_request |= DP_LINK_STATUS_UPDATED; + } else { + if (dp_link_is_video_pattern_requested(link)) { + ret = 0; + dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; + } + if (dp_link_is_audio_pattern_requested(link)) { + dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; + ret = -EINVAL; + } + } } + DRM_DEBUG_DP("sink request=%#x", dp_link->sink_request); return ret; } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 440b32753430..2181b60e1d1d 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -271,7 +271,7 @@ static u8 dp_panel_get_edid_checksum(struct edid *edid) { struct edid *last_block; u8 *raw_edid; - bool is_edid_corrupt; + bool is_edid_corrupt = false; if (!edid) { DRM_ERROR("invalid edid input\n"); @@ -303,7 +303,12 @@ void dp_panel_handle_sink_request(struct dp_panel *dp_panel) panel = container_of(dp_panel, struct dp_panel_private, dp_panel); if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { - u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid); + u8 checksum; + + if (dp_panel->edid) + checksum = dp_panel_get_edid_checksum(dp_panel->edid); + else + checksum = dp_panel->connector->real_edid_checksum; dp_link_send_edid_checksum(panel->link, checksum); dp_link_send_test_response(panel->link); diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c index 3961ba4efc3c..b48b45e92bfa 100644 --- a/drivers/gpu/drm/msm/dp/dp_power.c +++ b/drivers/gpu/drm/msm/dp/dp_power.c @@ -208,6 +208,9 @@ static int dp_power_clk_set_rate(struct dp_power_private *power, int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type) { + DRM_DEBUG_DP("core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n", + dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on); + if (pm_type == DP_CORE_PM) return dp_power->core_clks_on; diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 75afc12a7b25..614dc7f26f2c 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -13,6 +13,13 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) return msm_dsi->encoder; } +bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi) +{ + unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host); + + return !(host_flags & MIPI_DSI_MODE_VIDEO); +} + static int dsi_get_phy(struct msm_dsi *msm_dsi) { struct platform_device *pdev = msm_dsi->pdev; @@ -26,8 +33,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) } phy_pdev = of_find_device_by_node(phy_node); - if (phy_pdev) + if (phy_pdev) { msm_dsi->phy = platform_get_drvdata(phy_pdev); + msm_dsi->phy_dev = &phy_pdev->dev; + } of_node_put(phy_node); @@ -36,8 +45,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) return -EPROBE_DEFER; } - msm_dsi->phy_dev = get_device(&phy_pdev->dev); - return 0; } @@ -244,8 +251,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } - msm_dsi_manager_setup_encoder(msm_dsi->id); - priv->bridges[priv->num_bridges++] = msm_dsi->bridge; priv->connectors[priv->num_connectors++] = msm_dsi->connector; diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 9b8e9b07eced..b50db91cb8a7 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -80,10 +80,10 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id); struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); -void msm_dsi_manager_setup_encoder(int id); int msm_dsi_manager_register(struct msm_dsi *msm_dsi); void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); bool msm_dsi_manager_validate_current_config(u8 id); +void msm_dsi_manager_tpg_enable(void); /* msm dsi */ static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi) @@ -109,7 +109,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host); int msm_dsi_host_disable(struct mipi_dsi_host *host); int msm_dsi_host_power_on(struct mipi_dsi_host *host, struct msm_dsi_phy_shared_timings *phy_shared_timings, - bool is_dual_dsi); + bool is_bonded_dsi, struct msm_dsi_phy *phy); int msm_dsi_host_power_off(struct mipi_dsi_host *host); int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, const struct drm_display_mode *mode); @@ -123,7 +123,7 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, struct msm_dsi_phy_clk_request *clk_req, - bool is_dual_dsi); + bool is_bonded_dsi); void msm_dsi_host_destroy(struct mipi_dsi_host *host); int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, struct drm_device *dev); @@ -145,9 +145,11 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova); int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova); int dsi_clk_init_v2(struct msm_dsi_host *msm_host); int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host); -int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi); -int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi); +int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi); +int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi); void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host); +void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host); + /* dsi phy */ struct msm_dsi_phy; struct msm_dsi_phy_shared_timings { @@ -164,10 +166,9 @@ struct msm_dsi_phy_clk_request { void msm_dsi_phy_driver_register(void); void msm_dsi_phy_driver_unregister(void); int msm_dsi_phy_enable(struct msm_dsi_phy *phy, - struct msm_dsi_phy_clk_request *clk_req); + struct msm_dsi_phy_clk_request *clk_req, + struct msm_dsi_phy_shared_timings *shared_timings); void msm_dsi_phy_disable(struct msm_dsi_phy *phy); -void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, - struct msm_dsi_phy_shared_timings *shared_timing); void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, enum msm_dsi_phy_usecase uc); int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy, @@ -175,6 +176,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy, void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy); int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy); void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy); +bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable); #endif /* __DSI_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index eadbcc78fd72..49b551ad1bff 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -105,6 +105,32 @@ enum dsi_lane_swap { LANE_SWAP_3210 = 7, }; +enum video_config_bpp { + VIDEO_CONFIG_18BPP = 0, + VIDEO_CONFIG_24BPP = 1, +}; + +enum video_pattern_sel { + VID_PRBS = 0, + VID_INCREMENTAL = 1, + VID_FIXED = 2, + VID_MDSS_GENERAL_PATTERN = 3, +}; + +enum cmd_mdp_stream0_pattern_sel { + CMD_MDP_PRBS = 0, + CMD_MDP_INCREMENTAL = 1, + CMD_MDP_FIXED = 2, + CMD_MDP_MDSS_GENERAL_PATTERN = 3, +}; + +enum cmd_dma_pattern_sel { + CMD_DMA_PRBS = 0, + CMD_DMA_INCREMENTAL = 1, + CMD_DMA_FIXED = 2, + CMD_DMA_CUSTOM_PATTERN_DMA_FIFO = 3, +}; + #define DSI_IRQ_CMD_DMA_DONE 0x00000001 #define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002 #define DSI_IRQ_CMD_MDP_DONE 0x00000100 @@ -518,6 +544,7 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val) #define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000 #define REG_DSI_LANE_CTRL 0x000000a8 +#define DSI_LANE_CTRL_HS_REQ_SEL_PHY 0x01000000 #define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000 #define REG_DSI_LANE_SWAP_CTRL 0x000000ac @@ -564,6 +591,53 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val) #define REG_DSI_PHY_RESET 0x00000128 #define DSI_PHY_RESET_RESET 0x00000001 +#define REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL 0x00000160 + +#define REG_DSI_TPG_MAIN_CONTROL 0x00000198 +#define DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN 0x00000100 + +#define REG_DSI_TPG_VIDEO_CONFIG 0x000001a0 +#define DSI_TPG_VIDEO_CONFIG_BPP__MASK 0x00000003 +#define DSI_TPG_VIDEO_CONFIG_BPP__SHIFT 0 +static inline uint32_t DSI_TPG_VIDEO_CONFIG_BPP(enum video_config_bpp val) +{ + return ((val) << DSI_TPG_VIDEO_CONFIG_BPP__SHIFT) & DSI_TPG_VIDEO_CONFIG_BPP__MASK; +} +#define DSI_TPG_VIDEO_CONFIG_RGB 0x00000004 + +#define REG_DSI_TEST_PATTERN_GEN_CTRL 0x00000158 +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK 0x00030000 +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT 16 +static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL(enum cmd_dma_pattern_sel val) +{ + return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK; +} +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK 0x00000300 +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT 8 +static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(enum cmd_mdp_stream0_pattern_sel val) +{ + return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK; +} +#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK 0x00000030 +#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT 4 +static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(enum video_pattern_sel val) +{ + return ((val) << DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK; +} +#define DSI_TEST_PATTERN_GEN_CTRL_TPG_DMA_FIFO_MODE 0x00000004 +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_TPG_EN 0x00000002 +#define DSI_TEST_PATTERN_GEN_CTRL_EN 0x00000001 + +#define REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 0x00000168 + +#define REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER 0x00000180 +#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER 0x00000001 + +#define REG_DSI_TPG_MAIN_CONTROL2 0x0000019c +#define DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN 0x00000080 +#define DSI_TPG_MAIN_CONTROL2_CMD_MDP1_CHECKERED_RECTANGLE_PATTERN 0x00010000 +#define DSI_TPG_MAIN_CONTROL2_CMD_MDP2_CHECKERED_RECTANGLE_PATTERN 0x02000000 + #define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c #define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001 diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index f3f1c03c7db9..96bbc8b6d009 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -32,9 +32,8 @@ static const char * const dsi_6g_bus_clk_names[] = { static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { - .num = 4, + .num = 3, .regs = { - {"gdsc", -1, -1}, {"vdd", 150000, 100}, /* 3.0 V */ {"vdda", 100000, 100}, /* 1.2 V */ {"vddio", 100000, 100}, /* 1.8 V */ @@ -53,9 +52,8 @@ static const char * const dsi_8916_bus_clk_names[] = { static const struct msm_dsi_config msm8916_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { - .num = 3, + .num = 2, .regs = { - {"gdsc", -1, -1}, {"vdda", 100000, 100}, /* 1.2 V */ {"vddio", 100000, 100}, /* 1.8 V */ }, @@ -73,9 +71,8 @@ static const char * const dsi_8976_bus_clk_names[] = { static const struct msm_dsi_config msm8976_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { - .num = 3, + .num = 2, .regs = { - {"gdsc", -1, -1}, {"vdda", 100000, 100}, /* 1.2 V */ {"vddio", 100000, 100}, /* 1.8 V */ }, @@ -89,9 +86,8 @@ static const struct msm_dsi_config msm8976_dsi_cfg = { static const struct msm_dsi_config msm8994_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { - .num = 7, + .num = 6, .regs = { - {"gdsc", -1, -1}, {"vdda", 100000, 100}, /* 1.25 V */ {"vddio", 100000, 100}, /* 1.8 V */ {"vcca", 10000, 100}, /* 1.0 V */ @@ -154,7 +150,6 @@ static const struct msm_dsi_config sdm660_dsi_cfg = { .reg_cfg = { .num = 2, .regs = { - {"vdd", 73400, 32 }, /* 0.9 V */ {"vdda", 12560, 4 }, /* 1.2 V */ }, }, @@ -200,6 +195,24 @@ static const struct msm_dsi_config sc7180_dsi_cfg = { .num_dsi = 1, }; +static const char * const dsi_sc7280_bus_clk_names[] = { + "iface", "bus", +}; + +static const struct msm_dsi_config sc7280_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 1, + .regs = { + {"vdda", 8350, 0 }, /* 1.2 V */ + }, + }, + .bus_clk_names = dsi_sc7280_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names), + .io_start = { 0xae94000 }, + .num_dsi = 1, +}; + static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { .link_clk_set_rate = dsi_link_clk_set_rate_v2, .link_clk_enable = dsi_link_clk_enable_v2, @@ -267,6 +280,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1, &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0, + &sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops}, }; const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index ade9b609c7d9..41e99a9fb5de 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -24,6 +24,7 @@ #define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000 #define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000 #define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 +#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000 #define MSM_DSI_V2_VER_MINOR_8064 0x0 @@ -47,7 +48,7 @@ struct msm_dsi_host_cfg_ops { void* (*tx_buf_get)(struct msm_dsi_host *msm_host); void (*tx_buf_put)(struct msm_dsi_host *msm_host); int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova); - int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi); + int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_bonded_dsi); }; struct msm_dsi_cfg_handler { diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index b466a4af7c3e..e269df285136 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -27,6 +27,7 @@ #include "dsi_cfg.h" #include "msm_kms.h" #include "msm_gem.h" +#include "phy/dsi_phy.h" #define DSI_RESET_TOGGLE_DELAY_MS 20 @@ -167,6 +168,9 @@ struct msm_dsi_host { int dlane_swap; int num_data_lanes; + /* from phy DT */ + bool cphy_mode; + u32 dma_cmd_ctrl_restore; bool registered; @@ -203,35 +207,22 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( { const struct msm_dsi_cfg_handler *cfg_hnd = NULL; struct device *dev = &msm_host->pdev->dev; - struct regulator *gdsc_reg; struct clk *ahb_clk; int ret; u32 major = 0, minor = 0; - gdsc_reg = regulator_get(dev, "gdsc"); - if (IS_ERR(gdsc_reg)) { - pr_err("%s: cannot get gdsc\n", __func__); - goto exit; - } - ahb_clk = msm_clk_get(msm_host->pdev, "iface"); if (IS_ERR(ahb_clk)) { pr_err("%s: cannot get interface clock\n", __func__); - goto put_gdsc; + goto exit; } pm_runtime_get_sync(dev); - ret = regulator_enable(gdsc_reg); - if (ret) { - pr_err("%s: unable to enable gdsc\n", __func__); - goto put_gdsc; - } - ret = clk_prepare_enable(ahb_clk); if (ret) { pr_err("%s: unable to enable ahb_clk\n", __func__); - goto disable_gdsc; + goto runtime_put; } ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); @@ -246,11 +237,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( disable_clks: clk_disable_unprepare(ahb_clk); -disable_gdsc: - regulator_disable(gdsc_reg); +runtime_put: pm_runtime_put_sync(dev); -put_gdsc: - regulator_put(gdsc_reg); exit: return cfg_hnd; } @@ -510,6 +498,7 @@ int msm_dsi_runtime_resume(struct device *dev) int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) { + u32 byte_intf_rate; int ret; DBG("Set clk rates: pclk=%d, byteclk=%d", @@ -529,8 +518,13 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) } if (msm_host->byte_intf_clk) { - ret = clk_set_rate(msm_host->byte_intf_clk, - msm_host->byte_clk_rate / 2); + /* For CPHY, byte_intf_clk is same as byte_clk */ + if (msm_host->cphy_mode) + byte_intf_rate = msm_host->byte_clk_rate; + else + byte_intf_rate = msm_host->byte_clk_rate / 2; + + ret = clk_set_rate(msm_host->byte_intf_clk, byte_intf_rate); if (ret) { pr_err("%s: Failed to set rate byte intf clk, %d\n", __func__, ret); @@ -679,7 +673,7 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host) clk_disable_unprepare(msm_host->byte_clk); } -static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi) +static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi) { struct drm_display_mode *mode = msm_host->mode; u32 pclk_rate; @@ -687,22 +681,22 @@ static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi) pclk_rate = mode->clock * 1000; /* - * For dual DSI mode, the current DRM mode has the complete width of the + * For bonded DSI mode, the current DRM mode has the complete width of the * panel. Since, the complete panel is driven by two DSI controllers, * the clock rates have to be split between the two dsi controllers. * Adjust the byte and pixel clock rates for each dsi host accordingly. */ - if (is_dual_dsi) + if (is_bonded_dsi) pclk_rate /= 2; return pclk_rate; } -static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi) +static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) { u8 lanes = msm_host->lanes; u32 bpp = dsi_get_bpp(msm_host->format); - u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi); + u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi); u64 pclk_bpp = (u64)pclk_rate * bpp; if (lanes == 0) { @@ -710,7 +704,11 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi) lanes = 1; } - do_div(pclk_bpp, (8 * lanes)); + /* CPHY "byte_clk" is in units of 16 bits */ + if (msm_host->cphy_mode) + do_div(pclk_bpp, (16 * lanes)); + else + do_div(pclk_bpp, (8 * lanes)); msm_host->pixel_clk_rate = pclk_rate; msm_host->byte_clk_rate = pclk_bpp; @@ -720,28 +718,28 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi) } -int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi) +int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi) { if (!msm_host->mode) { pr_err("%s: mode not set\n", __func__); return -EINVAL; } - dsi_calc_pclk(msm_host, is_dual_dsi); + dsi_calc_pclk(msm_host, is_bonded_dsi); msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk); return 0; } -int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi) +int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi) { u32 bpp = dsi_get_bpp(msm_host->format); u64 pclk_bpp; unsigned int esc_mhz, esc_div; unsigned long byte_mhz; - dsi_calc_pclk(msm_host, is_dual_dsi); + dsi_calc_pclk(msm_host, is_bonded_dsi); - pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp; + pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_bonded_dsi) * bpp; do_div(pclk_bpp, 8); msm_host->src_clk_rate = pclk_bpp; @@ -834,7 +832,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( } static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, - struct msm_dsi_phy_shared_timings *phy_shared_timings) + struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy) { u32 flags = msm_host->mode_flags; enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; @@ -929,6 +927,10 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) { lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL); + + if (msm_dsi_phy_set_continuous_clock(phy, enable)) + lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY; + dsi_write(msm_host, REG_DSI_LANE_CTRL, lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); } @@ -936,9 +938,12 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, data |= DSI_CTRL_ENABLE; dsi_write(msm_host, REG_DSI_CTRL, data); + + if (msm_host->cphy_mode) + dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0)); } -static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi) +static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) { struct drm_display_mode *mode = msm_host->mode; u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */ @@ -956,13 +961,13 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi) DBG(""); /* - * For dual DSI mode, the current DRM mode has + * For bonded DSI mode, the current DRM mode has * the complete width of the panel. Since, the complete * panel is driven by two DSI controllers, the horizontal * timings have to be split between the two dsi controllers. * Adjust the DSI host timing values accordingly. */ - if (is_dual_dsi) { + if (is_bonded_dsi) { h_total /= 2; hs_end /= 2; ha_start /= 2; @@ -2226,6 +2231,8 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, struct clk *byte_clk_provider, *pixel_clk_provider; int ret; + msm_host->cphy_mode = src_phy->cphy_mode; + ret = msm_dsi_phy_get_clk_provider(src_phy, &byte_clk_provider, &pixel_clk_provider); if (ret) { @@ -2285,19 +2292,26 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, struct msm_dsi_phy_clk_request *clk_req, - bool is_dual_dsi) + bool is_bonded_dsi) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; int ret; - ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi); + ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_bonded_dsi); if (ret) { pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); return; } - clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; + /* CPHY transmits 16 bits over 7 clock cycles + * "byte_clk" is in units of 16-bits (see dsi_calc_pclk), + * so multiply by 7 to get the "bitclk rate" + */ + if (msm_host->cphy_mode) + clk_req->bitclk_rate = msm_host->byte_clk_rate * 7; + else + clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; clk_req->escclk_rate = msm_host->esc_clk_rate; } @@ -2354,7 +2368,7 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable) int msm_dsi_host_power_on(struct mipi_dsi_host *host, struct msm_dsi_phy_shared_timings *phy_shared_timings, - bool is_dual_dsi) + bool is_bonded_dsi, struct msm_dsi_phy *phy) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; @@ -2392,9 +2406,9 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host, goto fail_disable_clk; } - dsi_timing_setup(msm_host, is_dual_dsi); + dsi_timing_setup(msm_host, is_bonded_dsi); dsi_sw_reset(msm_host); - dsi_ctrl_config(msm_host, true, phy_shared_timings); + dsi_ctrl_config(msm_host, true, phy_shared_timings, phy); if (msm_host->disp_en_gpio) gpiod_set_value(msm_host->disp_en_gpio, 1); @@ -2425,7 +2439,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) goto unlock_ret; } - dsi_ctrl_config(msm_host, false, NULL); + dsi_ctrl_config(msm_host, false, NULL, NULL); if (msm_host->disp_en_gpio) gpiod_set_value(msm_host->disp_en_gpio, 0); @@ -2495,3 +2509,64 @@ void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_ho pm_runtime_put_sync(&msm_host->pdev->dev); } + +static void msm_dsi_host_video_test_pattern_setup(struct msm_dsi_host *msm_host) +{ + u32 reg; + + reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); + + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, 0xff); + /* draw checkered rectangle pattern */ + dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL, + DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN); + /* use 24-bit RGB test pttern */ + dsi_write(msm_host, REG_DSI_TPG_VIDEO_CONFIG, + DSI_TPG_VIDEO_CONFIG_BPP(VIDEO_CONFIG_24BPP) | + DSI_TPG_VIDEO_CONFIG_RGB); + + reg |= DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(VID_MDSS_GENERAL_PATTERN); + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); + + DBG("Video test pattern setup done\n"); +} + +static void msm_dsi_host_cmd_test_pattern_setup(struct msm_dsi_host *msm_host) +{ + u32 reg; + + reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); + + /* initial value for test pattern */ + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0, 0xff); + + reg |= DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(CMD_MDP_MDSS_GENERAL_PATTERN); + + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); + /* draw checkered rectangle pattern */ + dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL2, + DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN); + + DBG("Cmd test pattern setup done\n"); +} + +void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + bool is_video_mode = !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO); + u32 reg; + + if (is_video_mode) + msm_dsi_host_video_test_pattern_setup(msm_host); + else + msm_dsi_host_cmd_test_pattern_setup(msm_host); + + reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); + /* enable the test pattern generator */ + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, (reg | DSI_TEST_PATTERN_GEN_CTRL_EN)); + + /* for command mode need to trigger one frame from tpg */ + if (!is_video_mode) + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, + DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER); +} diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 4ebfedc4a9ac..c41d39f5b7cf 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -21,14 +21,14 @@ struct msm_dsi_manager { struct msm_dsi *dsi[DSI_MAX]; - bool is_dual_dsi; + bool is_bonded_dsi; bool is_sync_needed; int master_dsi_link_id; }; static struct msm_dsi_manager msm_dsim_glb; -#define IS_DUAL_DSI() (msm_dsim_glb.is_dual_dsi) +#define IS_BONDED_DSI() (msm_dsim_glb.is_bonded_dsi) #define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed) #define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id) @@ -42,18 +42,17 @@ static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id) return msm_dsim_glb.dsi[(id + 1) % DSI_MAX]; } -static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id) +static int dsi_mgr_parse_of(struct device_node *np, int id) { struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; - /* We assume 2 dsi nodes have the same information of dual-dsi and - * sync-mode, and only one node specifies master in case of dual mode. + /* We assume 2 dsi nodes have the same information of bonded dsi and + * sync-mode, and only one node specifies master in case of bonded mode. */ - if (!msm_dsim->is_dual_dsi) - msm_dsim->is_dual_dsi = of_property_read_bool( - np, "qcom,dual-dsi-mode"); + if (!msm_dsim->is_bonded_dsi) + msm_dsim->is_bonded_dsi = of_property_read_bool(np, "qcom,dual-dsi-mode"); - if (msm_dsim->is_dual_dsi) { + if (msm_dsim->is_bonded_dsi) { if (of_property_read_bool(np, "qcom,master-dsi")) msm_dsim->master_dsi_link_id = id; if (!msm_dsim->is_sync_needed) @@ -72,7 +71,7 @@ static int dsi_mgr_setup_components(int id) struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); int ret; - if (!IS_DUAL_DSI()) { + if (!IS_BONDED_DSI()) { ret = msm_dsi_host_register(msm_dsi->host, true); if (ret) return ret; @@ -100,7 +99,7 @@ static int dsi_mgr_setup_components(int id) if (ret) return ret; - /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */ + /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */ msm_dsi_phy_set_usecase(clk_master_dsi->phy, MSM_DSI_PHY_MASTER); msm_dsi_phy_set_usecase(clk_slave_dsi->phy, @@ -119,12 +118,11 @@ static int enable_phy(struct msm_dsi *msm_dsi, { struct msm_dsi_phy_clk_request clk_req; int ret; - bool is_dual_dsi = IS_DUAL_DSI(); + bool is_bonded_dsi = IS_BONDED_DSI(); - msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi); + msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi); - ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req); - msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings); + ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings); return ret; } @@ -138,12 +136,12 @@ dsi_mgr_phy_enable(int id, struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); int ret; - /* In case of dual DSI, some registers in PHY1 have been programmed + /* In case of bonded DSI, some registers in PHY1 have been programmed * during PLL0 clock's set_rate. The PHY1 reset called by host1 here * will silently reset those PHY1 registers. Therefore we need to reset * and enable both PHYs before any PLL clock operation. */ - if (IS_DUAL_DSI() && mdsi && sdsi) { + if (IS_BONDED_DSI() && mdsi && sdsi) { if (!mdsi->phy_enabled && !sdsi->phy_enabled) { msm_dsi_host_reset_phy(mdsi->host); msm_dsi_host_reset_phy(sdsi->host); @@ -178,11 +176,11 @@ static void dsi_mgr_phy_disable(int id) struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); /* disable DSI phy - * In dual-dsi configuration, the phy should be disabled for the + * In bonded dsi configuration, the phy should be disabled for the * first controller only when the second controller is disabled. */ msm_dsi->phy_enabled = false; - if (IS_DUAL_DSI() && mdsi && sdsi) { + if (IS_BONDED_DSI() && mdsi && sdsi) { if (!mdsi->phy_enabled && !sdsi->phy_enabled) { msm_dsi_phy_disable(sdsi->phy); msm_dsi_phy_disable(mdsi->phy); @@ -217,24 +215,6 @@ static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge) return dsi_bridge->id; } -static bool dsi_mgr_is_cmd_mode(struct msm_dsi *msm_dsi) -{ - unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host); - return !(host_flags & MIPI_DSI_MODE_VIDEO); -} - -void msm_dsi_manager_setup_encoder(int id) -{ - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct msm_drm_private *priv = msm_dsi->dev->dev_private; - struct msm_kms *kms = priv->kms; - struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi); - - if (encoder && kms->funcs->set_encoder_mode) - kms->funcs->set_encoder_mode(kms, encoder, - dsi_mgr_is_cmd_mode(msm_dsi)); -} - static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id) { struct msm_drm_private *priv = conn->dev->dev_private; @@ -244,7 +224,7 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id) struct msm_dsi *master_dsi, *slave_dsi; struct drm_panel *panel; - if (IS_DUAL_DSI() && !IS_MASTER_DSI_LINK(id)) { + if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) { master_dsi = other_dsi; slave_dsi = msm_dsi; } else { @@ -253,7 +233,7 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id) } /* - * There is only 1 panel in the global panel list for dual DSI mode. + * There is only 1 panel in the global panel list for bonded DSI mode. * Therefore slave dsi should get the drm_panel instance from master * dsi. */ @@ -264,20 +244,20 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id) return PTR_ERR(panel); } - if (!panel || !IS_DUAL_DSI()) + if (!panel || !IS_BONDED_DSI()) goto out; drm_object_attach_property(&conn->base, conn->dev->mode_config.tile_property, 0); /* - * Set split display info to kms once dual DSI panel is connected to + * Set split display info to kms once bonded DSI panel is connected to * both hosts. */ if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) { kms->funcs->set_split_display(kms, master_dsi->encoder, slave_dsi->encoder, - dsi_mgr_is_cmd_mode(msm_dsi)); + msm_dsi_is_cmd_mode(msm_dsi)); } out: @@ -317,7 +297,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) return 0; /* - * In dual DSI mode, we have one connector that can be + * In bonded DSI mode, we have one connector that can be * attached to the drm_panel. */ num = drm_panel_get_modes(panel, connector); @@ -366,30 +346,30 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX]; - bool is_dual_dsi = IS_DUAL_DSI(); + bool is_bonded_dsi = IS_BONDED_DSI(); int ret; DBG("id=%d", id); if (!msm_dsi_device_connected(msm_dsi)) return; - /* Do nothing with the host if it is slave-DSI in case of dual DSI */ - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) + /* Do nothing with the host if it is slave-DSI in case of bonded DSI */ + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) return; ret = dsi_mgr_phy_enable(id, phy_shared_timings); if (ret) goto phy_en_fail; - ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi); + ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_bonded_dsi, msm_dsi->phy); if (ret) { pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); goto host_on_fail; } - if (is_dual_dsi && msm_dsi1) { + if (is_bonded_dsi && msm_dsi1) { ret = msm_dsi_host_power_on(msm_dsi1->host, - &phy_shared_timings[DSI_1], is_dual_dsi); + &phy_shared_timings[DSI_1], is_bonded_dsi, msm_dsi1->phy); if (ret) { pr_err("%s: power on host1 failed, %d\n", __func__, ret); @@ -415,7 +395,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) goto host_en_fail; } - if (is_dual_dsi && msm_dsi1) { + if (is_bonded_dsi && msm_dsi1) { ret = msm_dsi_host_enable(msm_dsi1->host); if (ret) { pr_err("%s: enable host1 failed, %d\n", __func__, ret); @@ -431,7 +411,7 @@ host_en_fail: if (panel) drm_panel_unprepare(panel); panel_prep_fail: - if (is_dual_dsi && msm_dsi1) + if (is_bonded_dsi && msm_dsi1) msm_dsi_host_power_off(msm_dsi1->host); host1_on_fail: msm_dsi_host_power_off(host); @@ -441,20 +421,33 @@ phy_en_fail: return; } +void msm_dsi_manager_tpg_enable(void) +{ + struct msm_dsi *m_dsi = dsi_mgr_get_dsi(DSI_0); + struct msm_dsi *s_dsi = dsi_mgr_get_dsi(DSI_1); + + /* if dual dsi, trigger tpg on master first then slave */ + if (m_dsi) { + msm_dsi_host_test_pattern_en(m_dsi->host); + if (IS_BONDED_DSI() && s_dsi) + msm_dsi_host_test_pattern_en(s_dsi->host); + } +} + static void dsi_mgr_bridge_enable(struct drm_bridge *bridge) { int id = dsi_mgr_bridge_get_id(bridge); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct drm_panel *panel = msm_dsi->panel; - bool is_dual_dsi = IS_DUAL_DSI(); + bool is_bonded_dsi = IS_BONDED_DSI(); int ret; DBG("id=%d", id); if (!msm_dsi_device_connected(msm_dsi)) return; - /* Do nothing with the host if it is slave-DSI in case of dual DSI */ - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) + /* Do nothing with the host if it is slave-DSI in case of bonded DSI */ + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) return; if (panel) { @@ -471,15 +464,15 @@ static void dsi_mgr_bridge_disable(struct drm_bridge *bridge) int id = dsi_mgr_bridge_get_id(bridge); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct drm_panel *panel = msm_dsi->panel; - bool is_dual_dsi = IS_DUAL_DSI(); + bool is_bonded_dsi = IS_BONDED_DSI(); int ret; DBG("id=%d", id); if (!msm_dsi_device_connected(msm_dsi)) return; - /* Do nothing with the host if it is slave-DSI in case of dual DSI */ - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) + /* Do nothing with the host if it is slave-DSI in case of bonded DSI */ + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) return; if (panel) { @@ -497,7 +490,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; - bool is_dual_dsi = IS_DUAL_DSI(); + bool is_bonded_dsi = IS_BONDED_DSI(); int ret; DBG("id=%d", id); @@ -506,18 +499,18 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) return; /* - * Do nothing with the host if it is slave-DSI in case of dual DSI. + * Do nothing with the host if it is slave-DSI in case of bonded DSI. * It is safe to call dsi_mgr_phy_disable() here because a single PHY * won't be diabled until both PHYs request disable. */ - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) goto disable_phy; ret = msm_dsi_host_disable(host); if (ret) pr_err("%s: host %d disable failed, %d\n", __func__, id, ret); - if (is_dual_dsi && msm_dsi1) { + if (is_bonded_dsi && msm_dsi1) { ret = msm_dsi_host_disable(msm_dsi1->host); if (ret) pr_err("%s: host1 disable failed, %d\n", __func__, ret); @@ -537,7 +530,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) if (ret) pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); - if (is_dual_dsi && msm_dsi1) { + if (is_bonded_dsi && msm_dsi1) { ret = msm_dsi_host_power_off(msm_dsi1->host); if (ret) pr_err("%s: host1 power off failed, %d\n", @@ -556,15 +549,15 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); struct mipi_dsi_host *host = msm_dsi->host; - bool is_dual_dsi = IS_DUAL_DSI(); + bool is_bonded_dsi = IS_BONDED_DSI(); DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) return; msm_dsi_host_set_display_mode(host, adjusted_mode); - if (is_dual_dsi && other_dsi) + if (is_bonded_dsi && other_dsi) msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode); } @@ -640,15 +633,15 @@ fail: bool msm_dsi_manager_validate_current_config(u8 id) { - bool is_dual_dsi = IS_DUAL_DSI(); + bool is_bonded_dsi = IS_BONDED_DSI(); /* - * For dual DSI, we only have one drm panel. For this + * For bonded DSI, we only have one drm panel. For this * use case, we register only one bridge/connector. * Skip bridge/connector initialisation if it is - * slave-DSI for dual DSI configuration. + * slave-DSI for bonded DSI configuration. */ - if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) { + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) { DBG("Skip bridge registration for slave DSI->id: %d\n", id); return false; } @@ -740,7 +733,7 @@ int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg) if (!msg->tx_buf || !msg->tx_len) return 0; - /* In dual master case, panel requires the same commands sent to + /* In bonded master case, panel requires the same commands sent to * both DSI links. Host issues the command trigger to both links * when DSI_1 calls the cmd transfer function, no matter it happens * before or after DSI_0 cmd transfer. @@ -809,9 +802,9 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi) msm_dsim->dsi[id] = msm_dsi; - ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id); + ret = dsi_mgr_parse_of(msm_dsi->pdev->dev.of_node, id); if (ret) { - pr_err("%s: failed to parse dual DSI info\n", __func__); + pr_err("%s: failed to parse OF DSI info\n", __func__); goto fail; } @@ -840,3 +833,12 @@ void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi) msm_dsim->dsi[msm_dsi->id] = NULL; } +bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi) +{ + return IS_BONDED_DSI(); +} + +bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi) +{ + return IS_MASTER_DSI_LINK(msm_dsi->id); +} diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 6ca6bfd4809b..8c65ef6968ca 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -5,6 +5,7 @@ #include <linux/clk-provider.h> #include <linux/platform_device.h> +#include <dt-bindings/phy/phy.h> #include "dsi_phy.h" @@ -461,6 +462,51 @@ int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, return 0; } +int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x7; + s32 tmax, tmin; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x7 = ui * 7; + + temp = S_DIV_ROUND_UP(38 * coeff, ui_x7); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff) / ui_x7; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false); + + tmin = DIV_ROUND_UP(50 * coeff, ui_x7); + tmax = 255; + timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false); + + tmin = 1; + tmax = 32; + timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false); + + tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1); + tmax = 64; + timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false); + + DBG("%d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->clk_prepare, timing->hs_exit, timing->hs_rqst); + + return 0; +} + static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) { struct regulator_bulk_data *s = phy->supplies; @@ -593,6 +639,8 @@ static const struct of_device_id dsi_phy_dt_match[] = { .data = &dsi_phy_7nm_cfgs }, { .compatible = "qcom,dsi-phy-7nm-8150", .data = &dsi_phy_7nm_8150_cfgs }, + { .compatible = "qcom,sc7280-dsi-phy-7nm", + .data = &dsi_phy_7nm_7280_cfgs }, #endif {} }; @@ -625,17 +673,13 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) { struct msm_dsi_phy *phy; struct device *dev = &pdev->dev; - const struct of_device_id *match; + u32 phy_type; int ret; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; - match = of_match_node(dsi_phy_dt_match, dev->of_node); - if (!match) - return -ENODEV; - phy->provided_clocks = devm_kzalloc(dev, struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS), GFP_KERNEL); @@ -644,7 +688,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) phy->provided_clocks->num = NUM_PROVIDED_CLKS; - phy->cfg = match->data; + phy->cfg = of_device_get_match_data(&pdev->dev); + if (!phy->cfg) + return -ENODEV; + phy->pdev = pdev; phy->id = dsi_phy_get_id(phy); @@ -657,6 +704,8 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) phy->regulator_ldo_mode = of_property_read_bool(dev->of_node, "qcom,dsi-phy-regulator-ldo-mode"); + if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type)) + phy->cphy_mode = (phy_type == PHY_TYPE_CPHY); phy->base = msm_ioremap_size(pdev, "dsi_phy", "DSI_PHY", &phy->base_size); if (IS_ERR(phy->base)) { @@ -754,7 +803,8 @@ void __exit msm_dsi_phy_driver_unregister(void) } int msm_dsi_phy_enable(struct msm_dsi_phy *phy, - struct msm_dsi_phy_clk_request *clk_req) + struct msm_dsi_phy_clk_request *clk_req, + struct msm_dsi_phy_shared_timings *shared_timings) { struct device *dev = &phy->pdev->dev; int ret; @@ -782,6 +832,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, goto phy_en_fail; } + memcpy(shared_timings, &phy->timing.shared_timings, + sizeof(*shared_timings)); + /* * Resetting DSI PHY silently changes its PLL registers to reset status, * which will confuse clock driver and result in wrong output rate of @@ -821,13 +874,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) dsi_phy_disable_resource(phy); } -void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, - struct msm_dsi_phy_shared_timings *shared_timings) -{ - memcpy(shared_timings, &phy->timing.shared_timings, - sizeof(*shared_timings)); -} - void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, enum msm_dsi_phy_usecase uc) { @@ -835,6 +881,15 @@ void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, phy->usecase = uc; } +/* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */ +bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable) +{ + if (!phy || !phy->cfg->ops.set_continuous_clock) + return false; + + return phy->cfg->ops.set_continuous_clock(phy, enable); +} + int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy, struct clk **byte_clk_provider, struct clk **pixel_clk_provider) { diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index 5b0feef87127..b91303ada74f 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -24,6 +24,7 @@ struct msm_dsi_phy_ops { void (*disable)(struct msm_dsi_phy *phy); void (*save_pll_state)(struct msm_dsi_phy *phy); int (*restore_pll_state)(struct msm_dsi_phy *phy); + bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable); }; struct msm_dsi_phy_cfg { @@ -51,6 +52,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs; struct msm_dsi_dphy_timing { u32 clk_zero; @@ -99,6 +101,7 @@ struct msm_dsi_phy { enum msm_dsi_phy_usecase usecase; bool regulator_ldo_mode; + bool cphy_mode; struct clk_hw *vco_hw; bool pll_on; @@ -119,5 +122,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); #endif /* __DSI_PHY_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index e46b10fc793a..d8128f50b0dd 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -84,7 +84,7 @@ struct dsi_pll_10nm { #define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw) /* - * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * Global list of private DSI PLL struct pointers. We need this for bonded DSI * mode, where the master PLL's clk_ops needs access the slave's private data */ static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX]; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c index a34cf151c517..d13552b2213b 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -86,7 +86,7 @@ struct dsi_pll_14nm { /* * Private struct for N1/N2 post-divider clocks. These clocks are similar to * the generic clk_divider class of clocks. The only difference is that it - * also sets the slave DSI PLL's post-dividers if in Dual DSI mode + * also sets the slave DSI PLL's post-dividers if in bonded DSI mode */ struct dsi_pll_14nm_postdiv { struct clk_hw hw; @@ -102,7 +102,7 @@ struct dsi_pll_14nm_postdiv { #define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw) /* - * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * Global list of private DSI PLL struct pointers. We need this for bonded DSI * mode, where the master PLL's clk_ops needs access the slave's private data */ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; @@ -658,7 +658,7 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, val |= value << shift; dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); - /* If we're master in dual DSI mode, then the slave PLL's post-dividers + /* If we're master in bonded DSI mode, then the slave PLL's post-dividers * follow the master's post dividers */ if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) { @@ -1050,7 +1050,7 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = { .reg_cfg = { .num = 1, .regs = { - {"vcca", 17000, 32}, + {"vcca", 73400, 32}, }, }, .ops = { diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 7c23d4c47338..cb297b08458e 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -83,7 +83,7 @@ struct dsi_pll_7nm { #define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, clk_hw) /* - * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * Global list of private DSI PLL struct pointers. We need this for bonded DSI * mode, where the master PLL's clk_ops needs access the slave's private data */ static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX]; @@ -256,7 +256,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi (config->frac_div_start & 0x30000) >> 16); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); - dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */ + dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, pll->phy->cphy_mode ? 0x00 : 0x10); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters); } @@ -642,7 +642,8 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, - CLK_SET_RATE_PARENT, 1, 8); + CLK_SET_RATE_PARENT, 1, + pll_7nm->phy->cphy_mode ? 7 : 8); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; @@ -663,32 +664,47 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id); - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, - 0, 1, 4); + if (pll_7nm->phy->cphy_mode) + hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 2, 7); + else + hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 4); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; } - snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id); - snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id); - snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id); - snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); - - hw = devm_clk_hw_register_mux(dev, clk_name, - ((const char *[]){ - parent, parent2, parent3, parent4 - }), 4, 0, pll_7nm->phy->base + - REG_DSI_7nm_PHY_CMN_CLK_CFG1, - 0, 2, 0, NULL); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); - goto fail; + /* in CPHY mode, pclk_mux will always have post_out_div as parent + * don't register a pclk_mux clock and just use post_out_div instead + */ + if (pll_7nm->phy->cphy_mode) { + u32 data; + + data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3); + + snprintf(parent, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); + } else { + snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id); + snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id); + snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id); + snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); + + hw = devm_clk_hw_register_mux(dev, clk_name, + ((const char *[]){ + parent, parent2, parent3, parent4 + }), 4, 0, pll_7nm->phy->base + + REG_DSI_7nm_PHY_CMN_CLK_CFG1, + 0, 2, 0, NULL); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail; + } + + snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); } snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id); - snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); /* PIX CLK DIV : DIV_CTRL_7_4*/ hw = devm_clk_hw_register_divider(dev, clk_name, parent, @@ -813,15 +829,21 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, struct msm_dsi_dphy_timing *timing = &phy->timing; void __iomem *base = phy->base; bool less_than_1500_mhz; - u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; + u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0; + u32 glbl_pemph_ctrl_0; + u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl; u32 data; DBG(""); - if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) { + if (phy->cphy_mode) + ret = msm_dsi_cphy_timing_calc_v4(timing, clk_req); + else + ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req); + if (ret) { DRM_DEV_ERROR(&phy->pdev->dev, - "%s: D-PHY timing calculation failed\n", __func__); + "%s: PHY timing calculation failed\n", __func__); return -EINVAL; } @@ -842,6 +864,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, /* Alter PHY configurations if data rate less than 1.5GHZ*/ less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000); + /* For C-PHY, no low power settings for lower clk rate */ + if (phy->cphy_mode) + less_than_1500_mhz = false; + if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52; glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; @@ -856,6 +882,17 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, glbl_rescode_bot_ctrl = 0x3c; } + if (phy->cphy_mode) { + vreg_ctrl_0 = 0x51; + vreg_ctrl_1 = 0x55; + glbl_pemph_ctrl_0 = 0x11; + lane_ctrl0 = 0x17; + } else { + vreg_ctrl_1 = 0x5c; + glbl_pemph_ctrl_0 = 0x00; + lane_ctrl0 = 0x1f; + } + /* de-assert digital and pll power down */ data = BIT(6) | BIT(5); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data); @@ -876,15 +913,22 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84); + if (phy->cphy_mode) + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL, BIT(6)); + /* Enable LDO */ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, vreg_ctrl_1); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL, glbl_str_swi_cal_sel_ctrl); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0, glbl_hstx_str_ctrl_0); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, + glbl_pemph_ctrl_0); + if (phy->cphy_mode) + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1, 0x01); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL, glbl_rescode_top_ctrl); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL, @@ -894,10 +938,11 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, /* Remove power down from all blocks */ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, lane_ctrl0); /* Select full-rate mode */ - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40); + if (!phy->cphy_mode) + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40); ret = dsi_7nm_set_usecase(phy); if (ret) { @@ -907,22 +952,36 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, } /* DSI PHY timings */ - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12, - timing->shared_timings.clk_pre); - dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13, - timing->shared_timings.clk_post); + if (phy->cphy_mode) { + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, + timing->shared_timings.clk_pre); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, + timing->shared_timings.clk_post); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00); + } else { + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12, + timing->shared_timings.clk_pre); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13, + timing->shared_timings.clk_post); + } /* DSI lane settings */ dsi_phy_hw_v4_0_lane_settings(phy); @@ -932,6 +991,21 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, return 0; } +static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *base = phy->base; + u32 data; + + data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1); + if (enable) + data |= BIT(5) | BIT(6); + else + data &= ~(BIT(5) | BIT(6)); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1, data); + + return enable; +} + static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) { void __iomem *base = phy->base; @@ -972,6 +1046,7 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = { .pll_init = dsi_pll_7nm_init, .save_pll_state = dsi_7nm_pll_save_state, .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, }, .min_pll_rate = 600000000UL, #ifdef CONFIG_64BIT @@ -998,9 +1073,36 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = { .pll_init = dsi_pll_7nm_init, .save_pll_state = dsi_7nm_pll_save_state, .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, }, .min_pll_rate = 1000000000UL, .max_pll_rate = 3500000000UL, .io_start = { 0xae94400, 0xae96400 }, .num_dsi_phy = 2, }; + +const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = { + .has_phy_lane = true, + .reg_cfg = { + .num = 1, + .regs = { + {"vdds", 37550, 0}, + }, + }, + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000ULL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae94400 }, + .num_dsi_phy = 1, + .quirks = DSI_PHY_7NM_QUIRK_V4_1, +}; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index a332b09a5a11..2e6fc185e54d 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -603,6 +603,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) if (IS_ERR(priv->event_thread[i].worker)) { ret = PTR_ERR(priv->event_thread[i].worker); DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n"); + ret = PTR_ERR(priv->event_thread[i].worker); goto err_msm_uninit; } @@ -1057,17 +1058,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), }; -static const struct file_operations fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .poll = drm_poll, - .read = drm_read, - .llseek = no_llseek, - .mmap = msm_gem_mmap, -}; +DEFINE_DRM_GEM_FOPS(fops); static const struct drm_driver msm_driver = { .driver_features = DRIVER_GEM | @@ -1083,7 +1074,7 @@ static const struct drm_driver msm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, - .gem_prime_mmap = msm_gem_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, #endif diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 1a48a709ffb3..8b005d1ac899 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -309,7 +309,6 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); -int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); @@ -350,7 +349,9 @@ void __exit msm_dsi_unregister(void); int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, struct drm_encoder *encoder); void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi); - +bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi); +bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi); +bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi); #else static inline void __init msm_dsi_register(void) { @@ -367,7 +368,18 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi) { } - +static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi) +{ + return false; +} +static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi) +{ + return false; +} +static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi) +{ + return false; +} #endif #ifdef CONFIG_DRM_MSM_DP diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 67fae60f2fa5..0daaeb54ff6f 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -8,13 +8,12 @@ #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_prime.h> #include "msm_drv.h" #include "msm_gem.h" #include "msm_kms.h" -extern int msm_gem_mmap_obj(struct drm_gem_object *obj, - struct vm_area_struct *vma); static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma); /* @@ -48,15 +47,8 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par; struct msm_fbdev *fbdev = to_msm_fbdev(helper); struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0); - int ret = 0; - ret = drm_gem_mmap_obj(bo, bo->size, vma); - if (ret) { - pr_err("%s:drm_gem_mmap_obj fail\n", __func__); - return ret; - } - - return msm_gem_mmap_obj(bo, vma); + return drm_gem_prime_mmap(bo, vma); } static int msm_fbdev_create(struct drm_fb_helper *helper, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 5db07fc287ad..22308a1b66fc 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -217,31 +217,6 @@ static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) return prot; } -int msm_gem_mmap_obj(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); - - return 0; -} - -int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) { - DBG("mmap failed: %d", ret); - return ret; - } - - return msm_gem_mmap_obj(vma->vm_private_data, vma); -} - static vm_fault_t msm_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; @@ -817,8 +792,7 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) mutex_lock(&priv->mm_lock); if (msm_obj->evictable) mark_unevictable(msm_obj); - list_del(&msm_obj->mm_list); - list_add_tail(&msm_obj->mm_list, &gpu->active_list); + list_move_tail(&msm_obj->mm_list, &gpu->active_list); mutex_unlock(&priv->mm_lock); } } @@ -1077,6 +1051,17 @@ void msm_gem_free_object(struct drm_gem_object *obj) kfree(msm_obj); } +static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); + + return 0; +} + /* convenience method to construct a GEM buffer object, and userspace handle */ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, uint32_t size, uint32_t flags, uint32_t *handle, @@ -1114,6 +1099,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = { .get_sg_table = msm_gem_prime_get_sg_table, .vmap = msm_gem_prime_vmap, .vunmap = msm_gem_prime_vunmap, + .mmap = msm_gem_object_mmap, .vm_ops = &vm_ops, }; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index f9e3ffb2309a..e39a8e7ad843 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -106,9 +106,6 @@ struct msm_gem_object { }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) -int msm_gem_mmap_obj(struct drm_gem_object *obj, - struct vm_area_struct *vma); -int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_get_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova); diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 9880348a4dc7..fc94e061d6a7 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -39,17 +39,6 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) msm_gem_put_vaddr(obj); } -int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret < 0) - return ret; - - return msm_gem_mmap_obj(vma->vm_private_data, vma); -} - struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 9de7c42e1071..de2bc3467bb5 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -117,9 +117,6 @@ struct msm_kms_funcs { struct drm_encoder *encoder, struct drm_encoder *slave_encoder, bool is_cmd_mode); - void (*set_encoder_mode)(struct msm_kms *kms, - struct drm_encoder *encoder, - bool cmd_mode); /* cleanup: */ void (*destroy)(struct msm_kms *kms); |