diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno')
23 files changed, 886 insertions, 473 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 379a3d346c30..ec38db45d8a3 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -113,7 +113,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu) uint32_t *ptr, len; int i, ret; - a2xx_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error); + a2xx_gpummu_params(to_msm_vm(gpu->vm)->mmu, &pt_base, &tran_error); DBG("%s", gpu->name); @@ -466,19 +466,18 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu) return state; } -static struct msm_gem_address_space * -a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) +static struct drm_gpuvm * +a2xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev) { struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu); - struct msm_gem_address_space *aspace; + struct drm_gpuvm *vm; - aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, - 0xfff * SZ_64K); + vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", SZ_16M, 0xfff * SZ_64K, true); - if (IS_ERR(aspace) && !IS_ERR(mmu)) + if (IS_ERR(vm) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); - return aspace; + return vm; } static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) @@ -504,7 +503,7 @@ static const struct adreno_gpu_funcs funcs = { #endif .gpu_state_get = a2xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, - .create_address_space = a2xx_create_address_space, + .create_vm = a2xx_create_vm, .get_rptr = a2xx_get_rptr, }, }; @@ -551,14 +550,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev) else adreno_gpu->registers = a220_registers; - if (!gpu->aspace) { - dev_err(dev->dev, "No memory protection without MMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } - return gpu; fail: diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c index 4280f71e472a..0407c9bc8c1b 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c @@ -29,13 +29,16 @@ static void a2xx_gpummu_detach(struct msm_mmu *mmu) } static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova, - struct sg_table *sgt, size_t len, int prot) + struct sg_table *sgt, size_t off, size_t len, + int prot) { struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; struct sg_dma_page_iter dma_iter; unsigned prot_bits = 0; + WARN_ON(off != 0); + if (prot & IOMMU_WRITE) prot_bits |= 1; if (prot & IOMMU_READ) diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index b6df115bb567..a956cd79195e 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -526,7 +526,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_busy = a3xx_gpu_busy, .gpu_state_get = a3xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, - .create_address_space = adreno_create_address_space, + .create_vm = adreno_create_vm, .get_rptr = a3xx_get_rptr, }, }; @@ -581,21 +581,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) goto fail; } - if (!gpu->aspace) { - /* TODO we think it is possible to configure the GPU to - * restrict access to VRAM carveout. But the required - * registers are unknown. For now just bail out and - * limp along with just modesetting. If it turns out - * to not be possible to restrict access, then we must - * implement a cmdstream validator. - */ - DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } - icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); if (IS_ERR(icc_path)) { ret = PTR_ERR(icc_path); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index f1b18a6663f7..83f6329accba 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -645,7 +645,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_busy = a4xx_gpu_busy, .gpu_state_get = a4xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, - .create_address_space = adreno_create_address_space, + .create_vm = adreno_create_vm, .get_rptr = a4xx_get_rptr, }, .get_timestamp = a4xx_get_timestamp, @@ -695,21 +695,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull; - if (!gpu->aspace) { - /* TODO we think it is possible to configure the GPU to - * restrict access to VRAM carveout. But the required - * registers are unknown. For now just bail out and - * limp along with just modesetting. If it turns out - * to not be possible to restrict access, then we must - * implement a cmdstream validator. - */ - DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } - icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); if (IS_ERR(icc_path)) { ret = PTR_ERR(icc_path); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index 169b8fe688f8..625a4e787d8f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -116,13 +116,13 @@ reset_set(void *data, u64 val) adreno_gpu->fw[ADRENO_FW_PFP] = NULL; if (a5xx_gpu->pm4_bo) { - msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pm4_bo); a5xx_gpu->pm4_bo = NULL; } if (a5xx_gpu->pfp_bo) { - msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pfp_bo); a5xx_gpu->pfp_bo = NULL; } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 60aef0796236..4a04dc43a8e6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -622,7 +622,7 @@ static int a5xx_ucode_load(struct msm_gpu *gpu) a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, sizeof(u32) * gpu->nr_rings, MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a5xx_gpu->shadow_bo, + gpu->vm, &a5xx_gpu->shadow_bo, &a5xx_gpu->shadow_iova); if (IS_ERR(a5xx_gpu->shadow)) @@ -835,8 +835,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); - BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); - hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; + BUG_ON(adreno_gpu->ubwc_config->highest_bank_bit < 13); + hbb = adreno_gpu->ubwc_config->highest_bank_bit - 13; gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7); gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1); @@ -1042,22 +1042,22 @@ static void a5xx_destroy(struct msm_gpu *gpu) a5xx_preempt_fini(gpu); if (a5xx_gpu->pm4_bo) { - msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pm4_bo); } if (a5xx_gpu->pfp_bo) { - msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->pfp_bo); } if (a5xx_gpu->gpmu_bo) { - msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->gpmu_bo); } if (a5xx_gpu->shadow_bo) { - msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace); + msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->vm); drm_gem_object_put(a5xx_gpu->shadow_bo); } @@ -1457,7 +1457,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu, struct a5xx_crashdumper *dumper) { dumper->ptr = msm_gem_kernel_new(gpu->dev, - SZ_1M, MSM_BO_WC, gpu->aspace, + SZ_1M, MSM_BO_WC, gpu->vm, &dumper->bo, &dumper->iova); if (!IS_ERR(dumper->ptr)) @@ -1557,7 +1557,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, if (a5xx_crashdumper_run(gpu, &dumper)) { kfree(a5xx_state->hlsqregs); - msm_gem_kernel_put(dumper.bo, gpu->aspace); + msm_gem_kernel_put(dumper.bo, gpu->vm); return; } @@ -1565,7 +1565,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K), count * sizeof(u32)); - msm_gem_kernel_put(dumper.bo, gpu->aspace); + msm_gem_kernel_put(dumper.bo, gpu->vm); } static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu) @@ -1713,7 +1713,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_busy = a5xx_gpu_busy, .gpu_state_get = a5xx_gpu_state_get, .gpu_state_put = a5xx_gpu_state_put, - .create_address_space = adreno_create_address_space, + .create_vm = adreno_create_vm, .get_rptr = a5xx_get_rptr, }, .get_timestamp = a5xx_get_timestamp, @@ -1756,6 +1756,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; struct adreno_platform_config *config = pdev->dev.platform_data; + const struct qcom_ubwc_cfg_data *common_cfg; struct a5xx_gpu *a5xx_gpu = NULL; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; @@ -1786,21 +1787,20 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } - if (gpu->aspace) - msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler); + msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, + a5xx_fault_handler); /* Set up the preemption specific bits and pieces for each ringbuffer */ a5xx_preempt_init(gpu); - /* Set the highest bank bit */ - if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) - adreno_gpu->ubwc_config.highest_bank_bit = 15; - else - adreno_gpu->ubwc_config.highest_bank_bit = 14; + /* Inherit the common config and make some necessary fixups */ + common_cfg = qcom_ubwc_config_get_data(); + if (IS_ERR(common_cfg)) + return ERR_CAST(common_cfg); - /* a5xx only supports UBWC 1.0, these are not configurable */ - adreno_gpu->ubwc_config.macrotile_mode = 0; - adreno_gpu->ubwc_config.ubwc_swizzle = 0x7; + /* Copy the data into the internal struct to drop the const qualifier (temporarily) */ + adreno_gpu->_ubwc_config = *common_cfg; + adreno_gpu->ubwc_config = &adreno_gpu->_ubwc_config; adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 6b91e0bd1514..d6da7351cfbb 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -363,7 +363,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; ptr = msm_gem_kernel_new(drm, bosize, - MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); if (IS_ERR(ptr)) return; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index b5f9d40687d5..e4924b5e1c48 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -255,7 +255,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr = msm_gem_kernel_new(gpu->dev, A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, - MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); + MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -263,9 +263,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, /* The buffer to store counters needs to be unprivileged */ counters = msm_gem_kernel_new(gpu->dev, A5XX_PREEMPT_COUNTER_SIZE, - MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova); + MSM_BO_WC, gpu->vm, &counters_bo, &counters_iova); if (IS_ERR(counters)) { - msm_gem_kernel_put(bo, gpu->aspace); + msm_gem_kernel_put(bo, gpu->vm); return PTR_ERR(counters); } @@ -296,8 +296,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu) int i; for (i = 0; i < gpu->nr_rings; i++) { - msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace); - msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace); + msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->vm); + msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->vm); } } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index 70f7ad806c34..44df6410bce1 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -913,6 +913,11 @@ static const struct adreno_info a6xx_gpus[] = { { /* sentinel */ }, }, }, + .speedbins = ADRENO_SPEEDBINS( + { 0, 0 }, + { 185, 0 }, + { 127, 1 }, + ), }, { .chip_ids = ADRENO_CHIP_IDS( 0x06030001, @@ -1024,6 +1029,11 @@ static const struct adreno_info a6xx_gpus[] = { .gmu_cgc_mode = 0x00020200, .prim_fifo_threshold = 0x00300200, }, + .speedbins = ADRENO_SPEEDBINS( + { 0, 0 }, + { 169, 0 }, + { 113, 1 }, + ), }, { .chip_ids = ADRENO_CHIP_IDS(0x06030500), .family = ADRENO_6XX_GEN4, @@ -1335,7 +1345,7 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = { REG_A6XX_RB_NC_MODE_CNTL, REG_A6XX_RB_CMP_DBG_ECO_CNTL, REG_A7XX_GRAS_NC_MODE_CNTL, - REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, + REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, REG_A6XX_UCHE_GBIF_GX_CONFIG, REG_A6XX_UCHE_CLIENT_PF, REG_A6XX_TPL1_DBG_ECO_CNTL1, @@ -1343,6 +1353,69 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = { DECLARE_ADRENO_REGLIST_LIST(a7xx_pwrup_reglist); +/* Applicable for X185, A750 */ +static const u32 a750_ifpc_reglist_regs[] = { + REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), + REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(1), + REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2), + REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3), + REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4), + REG_A6XX_TPL1_NC_MODE_CNTL, + REG_A6XX_SP_NC_MODE_CNTL, + REG_A6XX_CP_DBG_ECO_CNTL, + REG_A6XX_CP_PROTECT_CNTL, + REG_A6XX_CP_PROTECT(0), + REG_A6XX_CP_PROTECT(1), + REG_A6XX_CP_PROTECT(2), + REG_A6XX_CP_PROTECT(3), + REG_A6XX_CP_PROTECT(4), + REG_A6XX_CP_PROTECT(5), + REG_A6XX_CP_PROTECT(6), + REG_A6XX_CP_PROTECT(7), + REG_A6XX_CP_PROTECT(8), + REG_A6XX_CP_PROTECT(9), + REG_A6XX_CP_PROTECT(10), + REG_A6XX_CP_PROTECT(11), + REG_A6XX_CP_PROTECT(12), + REG_A6XX_CP_PROTECT(13), + REG_A6XX_CP_PROTECT(14), + REG_A6XX_CP_PROTECT(15), + REG_A6XX_CP_PROTECT(16), + REG_A6XX_CP_PROTECT(17), + REG_A6XX_CP_PROTECT(18), + REG_A6XX_CP_PROTECT(19), + REG_A6XX_CP_PROTECT(20), + REG_A6XX_CP_PROTECT(21), + REG_A6XX_CP_PROTECT(22), + REG_A6XX_CP_PROTECT(23), + REG_A6XX_CP_PROTECT(24), + REG_A6XX_CP_PROTECT(25), + REG_A6XX_CP_PROTECT(26), + REG_A6XX_CP_PROTECT(27), + REG_A6XX_CP_PROTECT(28), + REG_A6XX_CP_PROTECT(29), + REG_A6XX_CP_PROTECT(30), + REG_A6XX_CP_PROTECT(31), + REG_A6XX_CP_PROTECT(32), + REG_A6XX_CP_PROTECT(33), + REG_A6XX_CP_PROTECT(34), + REG_A6XX_CP_PROTECT(35), + REG_A6XX_CP_PROTECT(36), + REG_A6XX_CP_PROTECT(37), + REG_A6XX_CP_PROTECT(38), + REG_A6XX_CP_PROTECT(39), + REG_A6XX_CP_PROTECT(40), + REG_A6XX_CP_PROTECT(41), + REG_A6XX_CP_PROTECT(42), + REG_A6XX_CP_PROTECT(43), + REG_A6XX_CP_PROTECT(44), + REG_A6XX_CP_PROTECT(45), + REG_A6XX_CP_PROTECT(46), + REG_A6XX_CP_PROTECT(47), +}; + +DECLARE_ADRENO_REGLIST_LIST(a750_ifpc_reglist); + static const struct adreno_info a7xx_gpus[] = { { .chip_ids = ADRENO_CHIP_IDS(0x07000200), @@ -1432,16 +1505,36 @@ static const struct adreno_info a7xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV | - ADRENO_QUIRK_PREEMPTION, + ADRENO_QUIRK_PREEMPTION | + ADRENO_QUIRK_IFPC, .init = a6xx_gpu_init, .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, .pwrup_reglist = &a7xx_pwrup_reglist, + .ifpc_reglist = &a750_ifpc_reglist, .gmu_chipid = 0x7050001, .gmu_cgc_mode = 0x00020202, + .bcms = (const struct a6xx_bcm[]) { + { .name = "SH0", .buswidth = 16 }, + { .name = "MC0", .buswidth = 4 }, + { + .name = "ACV", + .fixed = true, + .perfmode = BIT(3), + .perfmode_bw = 16500000, + }, + { /* sentinel */ }, + }, }, .preempt_record_size = 4192 * SZ_1K, + .speedbins = ADRENO_SPEEDBINS( + { 0, 0 }, + { 59, 1 }, + { 7, 2 }, + { 232, 3 }, + { 146, 4 }, + ), }, { .chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */ .family = ADRENO_7XX_GEN3, @@ -1453,12 +1546,14 @@ static const struct adreno_info a7xx_gpus[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | ADRENO_QUIRK_HAS_HW_APRIV | - ADRENO_QUIRK_PREEMPTION, + ADRENO_QUIRK_PREEMPTION | + ADRENO_QUIRK_IFPC, .init = a6xx_gpu_init, .zapfw = "gen70900_zap.mbn", .a6xx = &(const struct a6xx_info) { .protect = &a730_protect, .pwrup_reglist = &a7xx_pwrup_reglist, + .ifpc_reglist = &a750_ifpc_reglist, .gmu_chipid = 0x7090100, .gmu_cgc_mode = 0x00020202, .bcms = (const struct a6xx_bcm[]) { @@ -1474,6 +1569,45 @@ static const struct adreno_info a7xx_gpus[] = { }, }, .preempt_record_size = 3572 * SZ_1K, + }, { + .chip_ids = ADRENO_CHIP_IDS(0x43030c00), + .family = ADRENO_7XX_GEN2, + .fw = { + [ADRENO_FW_SQE] = "gen71500_sqe.fw", + [ADRENO_FW_GMU] = "gen71500_gmu.bin", + }, + .gmem = SZ_1M + SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, + .init = a6xx_gpu_init, + .a6xx = &(const struct a6xx_info) { + .hwcg = a740_hwcg, + .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, + .gmu_chipid = 0x70f0000, + .gmu_cgc_mode = 0x00020222, + .bcms = (const struct a6xx_bcm[]) { + { .name = "SH0", .buswidth = 16 }, + { .name = "MC0", .buswidth = 4 }, + { + .name = "ACV", + .fixed = true, + .perfmode = BIT(3), + .perfmode_bw = 16500000, + }, + { /* sentinel */ }, + }, + }, + .preempt_record_size = 4192 * SZ_1K, + .speedbins = ADRENO_SPEEDBINS( + { 0, 0 }, + { 294, 1 }, + { 263, 2 }, + { 233, 3 }, + { 141, 4 }, + ), } }; DECLARE_ADRENO_GPULIST(a7xx); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 38c0f8ef85c3..fc62fef2fed8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -93,14 +93,25 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) /* Check to see if the GX rail is still powered */ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; u32 val; /* This can be called from gpu state code so make sure GMU is valid */ if (!gmu->initialized) return false; + /* If GMU is absent, then GX power domain is ON as long as GPU is in active state */ + if (adreno_has_gmu_wrapper(adreno_gpu)) + return true; + val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); + if (adreno_is_a7xx(adreno_gpu)) + return !(val & + (A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | + A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); + return !(val & (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); @@ -272,6 +283,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) if (ret) DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); + set_bit(GMU_STATUS_FW_START, &gmu->status); + return ret; } @@ -403,7 +416,10 @@ int a6xx_sptprac_enable(struct a6xx_gmu *gmu) int ret; u32 val; - if (!gmu->legacy) + WARN_ON(!gmu->legacy); + + /* Nothing to do if GMU does the power management */ + if (gmu->idle_level > GMU_IDLE_STATE_ACTIVE) return 0; gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); @@ -518,6 +534,9 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) int ret; u32 val; + if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status)) + return 0; + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1)); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, @@ -545,6 +564,9 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) int ret; u32 val; + if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status)) + return; + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, @@ -553,6 +575,8 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); + + set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status); } static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) @@ -681,8 +705,6 @@ setup_pdc: /* ensure no writes happen before the uCode is fully written */ wmb(); - a6xx_rpmh_stop(gmu); - err: if (!IS_ERR_OR_NULL(pdcptr)) iounmap(pdcptr); @@ -842,19 +864,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) else gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); - if (state == GMU_WARM_BOOT) { - ret = a6xx_rpmh_start(gmu); - if (ret) - return ret; - } else { + ret = a6xx_rpmh_start(gmu); + if (ret) + return ret; + + if (state == GMU_COLD_BOOT) { if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], "GMU firmware is not loaded\n")) return -ENOENT; - ret = a6xx_rpmh_start(gmu); - if (ret) - return ret; - ret = a6xx_gmu_fw_load(gmu); if (ret) return ret; @@ -925,10 +943,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) ret = a6xx_gmu_gfx_rail_on(gmu); if (ret) return ret; - } - /* Enable SPTP_PC if the CPU is responsible for it */ - if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { ret = a6xx_sptprac_enable(gmu); if (ret) return ret; @@ -980,6 +995,22 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) val, (val & 1), 100, 10000); gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off, val, (val & 1), 100, 1000); + + if (!adreno_is_a740_family(adreno_gpu)) + return; + + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS5_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS6_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS7_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 1000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS8_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS9_DRV0_STATUS + seqmem_off, + val, (val & 1), 100, 1000); } /* Force the GMU off in case it isn't responsive */ @@ -1023,6 +1054,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) /* Reset GPU core blocks */ a6xx_gpu_sw_reset(gpu, true); + + a6xx_rpmh_stop(gmu); } static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) @@ -1128,6 +1161,11 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Set the GPU to the current freq */ a6xx_gmu_set_initial_freq(gpu, gmu); + if (refcount_read(&gpu->sysprof_active) > 1) { + ret = a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + if (!ret) + set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status); + } out: /* On failure, shut down the GMU to leave it in a good state */ if (ret) { @@ -1175,6 +1213,9 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); } + if (test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) + a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + ret = a6xx_gmu_wait_for_idle(gmu); /* If the GMU isn't responding assume it is hung */ @@ -1259,15 +1300,17 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) { - msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); - msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); - msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); - msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); - msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); - msm_gem_kernel_put(gmu->log.obj, gmu->aspace); - - gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); - msm_gem_address_space_put(gmu->aspace); + struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu; + + msm_gem_kernel_put(gmu->hfi.obj, gmu->vm); + msm_gem_kernel_put(gmu->debug.obj, gmu->vm); + msm_gem_kernel_put(gmu->icache.obj, gmu->vm); + msm_gem_kernel_put(gmu->dcache.obj, gmu->vm); + msm_gem_kernel_put(gmu->dummy.obj, gmu->vm); + msm_gem_kernel_put(gmu->log.obj, gmu->vm); + + mmu->funcs->detach(mmu); + drm_gpuvm_put(gmu->vm); } static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, @@ -1296,7 +1339,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, if (IS_ERR(bo->obj)) return PTR_ERR(bo->obj); - ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, + ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova, range_start, range_end); if (ret) { drm_gem_object_put(bo->obj); @@ -1311,19 +1354,17 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, return 0; } -static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) +static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu) { struct msm_mmu *mmu; mmu = msm_iommu_new(gmu->dev, 0); - if (!mmu) - return -ENODEV; if (IS_ERR(mmu)) return PTR_ERR(mmu); - gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); - if (IS_ERR(gmu->aspace)) - return PTR_ERR(gmu->aspace); + gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true); + if (IS_ERR(gmu->vm)) + return PTR_ERR(gmu->vm); return 0; } @@ -1690,6 +1731,7 @@ static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu) u32 val; freq = gmu->gpu_freqs[i]; + /* This is unlikely to fail because we are passing back a known freq */ opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true); np = dev_pm_opp_get_of_node(opp); @@ -1788,6 +1830,35 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, return irq; } +void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + unsigned int sysprof_active; + + /* Nothing to do if GPU is suspended. We will handle this during GMU resume */ + if (!pm_runtime_get_if_active(&gpu->pdev->dev)) + return; + + mutex_lock(&gmu->lock); + + sysprof_active = refcount_read(&gpu->sysprof_active); + + /* + * 'Perfcounter select' register values are lost during IFPC collapse. To avoid that, + * use the currently unused perfcounter oob vote to block IFPC when sysprof is active + */ + if ((sysprof_active > 1) && !test_and_set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) + a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + else if ((sysprof_active == 1) && test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) + a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET); + + mutex_unlock(&gmu->lock); + + pm_runtime_put(&gpu->pdev->dev); +} + void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; @@ -1930,8 +2001,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (ret) return ret; - /* Fow now, don't do anything fancy until we get our feet under us */ - gmu->idle_level = GMU_IDLE_STATE_ACTIVE; + /* Set GMU idle level */ + gmu->idle_level = (adreno_gpu->info->quirks & ADRENO_QUIRK_IFPC) ? + GMU_IDLE_STATE_IFPC : GMU_IDLE_STATE_ACTIVE; pm_runtime_enable(gmu->dev); @@ -1940,7 +2012,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (ret) goto err_put_device; - ret = a6xx_gmu_memory_probe(gmu); + ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu); if (ret) goto err_put_device; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index b2d4489b4024..06cfc294016f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -50,6 +50,9 @@ struct a6xx_bcm { /* The GMU does not do any idle state management */ #define GMU_IDLE_STATE_ACTIVE 0 +/* Unknown power state. Not exposed by the firmware. For documentation purpose only */ +#define GMU_IDLE_STATE_RESERVED 1 + /* The GMU manages SPTP power collapse */ #define GMU_IDLE_STATE_SPTP 2 @@ -62,7 +65,7 @@ struct a6xx_gmu { /* For serializing communication with the GMU: */ struct mutex lock; - struct msm_gem_address_space *aspace; + struct drm_gpuvm *vm; void __iomem *mmio; void __iomem *rscc; @@ -117,6 +120,14 @@ struct a6xx_gmu { struct qmp *qmp; struct a6xx_hfi_msg_bw_table *bw_table; + +/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */ +#define GMU_STATUS_FW_START 0 +/* To track if PDC sleep seq was done */ +#define GMU_STATUS_PDC_SLEEP 1 +/* To track Perfcounter OOB set status */ +#define GMU_STATUS_OOB_PERF_SET 2 + unsigned long status; }; static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) @@ -158,6 +169,9 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) #define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \ readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \ interval, timeout) +#define gmu_poll_timeout_atomic(gmu, addr, val, cond, interval, timeout) \ + readl_poll_timeout_atomic((gmu)->mmio + ((addr) << 2), val, cond, \ + interval, timeout) static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset) { diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 491fde0083a2..b8f8ae940b55 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -16,6 +16,97 @@ #define GPU_PAS_ID 13 +static u64 read_gmu_ao_counter(struct a6xx_gpu *a6xx_gpu) +{ + u64 count_hi, count_lo, temp; + + do { + count_hi = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H); + count_lo = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L); + temp = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H); + } while (unlikely(count_hi != temp)); + + return (count_hi << 32) | count_lo; +} + +static bool fence_status_check(struct msm_gpu *gpu, u32 offset, u32 value, u32 status, u32 mask) +{ + /* Success if !writedropped0/1 */ + if (!(status & mask)) + return true; + + udelay(10); + + /* Try to update fenced register again */ + gpu_write(gpu, offset, value); + + /* We can't do a posted write here because the power domain could be + * in collapse state. So use the heaviest barrier instead + */ + mb(); + return false; +} + +static int fenced_write(struct a6xx_gpu *a6xx_gpu, u32 offset, u32 value, u32 mask) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + u32 status; + + gpu_write(gpu, offset, value); + + /* Nothing else to be done in the case of no-GMU */ + if (adreno_has_gmu_wrapper(adreno_gpu)) + return 0; + + /* We can't do a posted write here because the power domain could be + * in collapse state. So use the heaviest barrier instead + */ + mb(); + + if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status, + fence_status_check(gpu, offset, value, status, mask), 0, 1000)) + return 0; + + /* Try again for another 1ms before failing */ + gpu_write(gpu, offset, value); + mb(); + + if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status, + fence_status_check(gpu, offset, value, status, mask), 0, 1000)) { + /* + * The 'delay' warning is here because the pause to print this + * warning will allow gpu to move to power collapse which + * defeats the purpose of continuous polling for 2 ms + */ + dev_err_ratelimited(gmu->dev, "delay in fenced register write (0x%x)\n", + offset); + return 0; + } + + dev_err_ratelimited(gmu->dev, "fenced register write (0x%x) fail\n", + offset); + + return -ETIMEDOUT; +} + +int a6xx_fenced_write(struct a6xx_gpu *a6xx_gpu, u32 offset, u64 value, u32 mask, bool is_64b) +{ + int ret; + + ret = fenced_write(a6xx_gpu, offset, lower_32_bits(value), mask); + if (ret) + return ret; + + if (!is_64b) + return 0; + + ret = fenced_write(a6xx_gpu, offset + 1, upper_32_bits(value), mask); + + return ret; +} + static inline bool _a6xx_check_idle(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -86,7 +177,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Update HW if this is the current ring and we are not in preempt*/ if (!a6xx_in_preempt(a6xx_gpu)) { if (a6xx_gpu->cur_ring == ring) - gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false); else ring->restore_wptr = true; } else { @@ -111,7 +202,8 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, struct msm_ringbuffer *ring, struct msm_gem_submit *submit) { bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; - struct msm_file_private *ctx = submit->queue->ctx; + struct msm_context *ctx = submit->queue->ctx; + struct drm_gpuvm *vm = msm_context_vm(submit->dev, ctx); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; phys_addr_t ttbr; u32 asid; @@ -120,7 +212,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, if (ctx->seqno == ring->cur_ctx_seqno) return; - if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) + if (msm_iommu_pagetable_params(to_msm_vm(vm)->mmu, &ttbr, &asid)) return; if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) { @@ -172,8 +264,8 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, * Needed for preemption */ OUT_PKT7(ring, CP_MEM_WRITE, 5); - OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr))); - OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr))); + OUT_RING(ring, A5XX_CP_MEM_WRITE_ADDR_LO(lower_32_bits(memptr))); + OUT_RING(ring, A5XX_CP_MEM_WRITE_ADDR_HI(upper_32_bits(memptr))); OUT_RING(ring, lower_32_bits(ttbr)); OUT_RING(ring, upper_32_bits(ttbr)); OUT_RING(ring, ctx->seqno); @@ -203,9 +295,9 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, */ OUT_PKT7(ring, CP_WAIT_REG_MEM, 6); OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ)); - OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO( + OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO( REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS)); - OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0)); + OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_HI(0)); OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1)); OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1)); OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0)); @@ -297,8 +389,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->seqno); - trace_msm_gpu_submit_flush(submit, - gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); + trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu)); a6xx_flush(gpu, ring); } @@ -498,8 +589,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) } - trace_msm_gpu_submit_flush(submit, - gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); + trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu)); a6xx_flush(gpu, ring); @@ -603,117 +693,118 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_CP_PROTECT(protect->count_max - 1), protect->regs[i]); } -static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) +static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu) { - gpu->ubwc_config.rgb565_predicator = 0; - gpu->ubwc_config.uavflagprd_inv = 0; - gpu->ubwc_config.min_acc_len = 0; - gpu->ubwc_config.ubwc_swizzle = 0x6; - gpu->ubwc_config.macrotile_mode = 0; - gpu->ubwc_config.highest_bank_bit = 15; + const struct qcom_ubwc_cfg_data *common_cfg; + struct qcom_ubwc_cfg_data *cfg = &gpu->_ubwc_config; + + /* Inherit the common config and make some necessary fixups */ + common_cfg = qcom_ubwc_config_get_data(); + if (IS_ERR(common_cfg)) + return PTR_ERR(common_cfg); + + /* Copy the data into the internal struct to drop the const qualifier (temporarily) */ + *cfg = *common_cfg; + + cfg->ubwc_swizzle = 0x6; + cfg->highest_bank_bit = 15; if (adreno_is_a610(gpu)) { - gpu->ubwc_config.highest_bank_bit = 13; - gpu->ubwc_config.min_acc_len = 1; - gpu->ubwc_config.ubwc_swizzle = 0x7; + cfg->highest_bank_bit = 13; + cfg->ubwc_swizzle = 0x7; } if (adreno_is_a618(gpu)) - gpu->ubwc_config.highest_bank_bit = 14; + cfg->highest_bank_bit = 14; if (adreno_is_a619(gpu)) /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */ - gpu->ubwc_config.highest_bank_bit = 13; + cfg->highest_bank_bit = 13; if (adreno_is_a619_holi(gpu)) - gpu->ubwc_config.highest_bank_bit = 13; - - if (adreno_is_a621(gpu)) { - gpu->ubwc_config.highest_bank_bit = 13; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - } - - if (adreno_is_a623(gpu)) { - gpu->ubwc_config.highest_bank_bit = 16; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.rgb565_predicator = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - gpu->ubwc_config.macrotile_mode = 1; - } + cfg->highest_bank_bit = 13; - if (adreno_is_a640_family(gpu)) - gpu->ubwc_config.amsbc = 1; + if (adreno_is_a621(gpu)) + cfg->highest_bank_bit = 13; - if (adreno_is_a680(gpu)) - gpu->ubwc_config.macrotile_mode = 1; + if (adreno_is_a623(gpu)) + cfg->highest_bank_bit = 16; if (adreno_is_a650(gpu) || adreno_is_a660(gpu) || adreno_is_a690(gpu) || adreno_is_a730(gpu) || adreno_is_a740_family(gpu)) { - /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */ - gpu->ubwc_config.highest_bank_bit = 16; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.rgb565_predicator = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - gpu->ubwc_config.macrotile_mode = 1; + /* TODO: get ddr type from bootloader and use 15 for LPDDR4 */ + cfg->highest_bank_bit = 16; } if (adreno_is_a663(gpu)) { - gpu->ubwc_config.highest_bank_bit = 13; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.rgb565_predicator = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - gpu->ubwc_config.macrotile_mode = 1; - gpu->ubwc_config.ubwc_swizzle = 0x4; + cfg->highest_bank_bit = 13; + cfg->ubwc_swizzle = 0x4; } - if (adreno_is_7c3(gpu)) { - gpu->ubwc_config.highest_bank_bit = 14; - gpu->ubwc_config.amsbc = 1; - gpu->ubwc_config.uavflagprd_inv = 2; - gpu->ubwc_config.macrotile_mode = 1; - } + if (adreno_is_7c3(gpu)) + cfg->highest_bank_bit = 14; - if (adreno_is_a702(gpu)) { - gpu->ubwc_config.highest_bank_bit = 14; - gpu->ubwc_config.min_acc_len = 1; - } + if (adreno_is_a702(gpu)) + cfg->highest_bank_bit = 14; + + if (cfg->highest_bank_bit != common_cfg->highest_bank_bit) + DRM_WARN_ONCE("Inconclusive highest_bank_bit value: %u (GPU) vs %u (UBWC_CFG)\n", + cfg->highest_bank_bit, common_cfg->highest_bank_bit); + + if (cfg->ubwc_swizzle != common_cfg->ubwc_swizzle) + DRM_WARN_ONCE("Inconclusive ubwc_swizzle value: %u (GPU) vs %u (UBWC_CFG)\n", + cfg->ubwc_swizzle, common_cfg->ubwc_swizzle); + + gpu->ubwc_config = &gpu->_ubwc_config; + + return 0; } static void a6xx_set_ubwc_config(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const struct qcom_ubwc_cfg_data *cfg = adreno_gpu->ubwc_config; /* * We subtract 13 from the highest bank bit (13 is the minimum value * allowed by hw) and write the lowest two bits of the remaining value * as hbb_lo and the one above it as hbb_hi to the hardware. */ - BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); - u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; + BUG_ON(cfg->highest_bank_bit < 13); + u32 hbb = cfg->highest_bank_bit - 13; + bool rgb565_predicator = cfg->ubwc_enc_version >= UBWC_4_0; + u32 level2_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL2); + bool ubwc_mode = qcom_ubwc_get_ubwc_mode(cfg); + bool amsbc = cfg->ubwc_enc_version >= UBWC_3_0; + bool min_acc_len_64b = false; + u8 uavflagprd_inv = 0; u32 hbb_hi = hbb >> 2; u32 hbb_lo = hbb & 3; - u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1; - u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2); + + if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) + uavflagprd_inv = 2; + + if (adreno_is_a610(adreno_gpu) || adreno_is_a702(adreno_gpu)) + min_acc_len_64b = true; gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, level2_swizzling_dis << 12 | - adreno_gpu->ubwc_config.rgb565_predicator << 11 | - hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 | - adreno_gpu->ubwc_config.min_acc_len << 3 | + rgb565_predicator << 11 | + hbb_hi << 10 | amsbc << 4 | + min_acc_len_64b << 3 | hbb_lo << 1 | ubwc_mode); gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, level2_swizzling_dis << 6 | hbb_hi << 4 | - adreno_gpu->ubwc_config.min_acc_len << 3 | + min_acc_len_64b << 3 | hbb_lo << 1 | ubwc_mode); gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, level2_swizzling_dis << 12 | hbb_hi << 10 | - adreno_gpu->ubwc_config.uavflagprd_inv << 4 | - adreno_gpu->ubwc_config.min_acc_len << 3 | + uavflagprd_inv << 4 | + min_acc_len_64b << 3 | hbb_lo << 1 | ubwc_mode); if (adreno_is_a7xx(adreno_gpu)) @@ -721,10 +812,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) FIELD_PREP(GENMASK(8, 5), hbb_lo)); gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, - adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21); + min_acc_len_64b << 23 | hbb_lo << 21); gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL, - adreno_gpu->ubwc_config.macrotile_mode); + cfg->macrotile_mode); } static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu) @@ -737,11 +828,10 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu) u32 *dest = (u32 *)&lock->regs[0]; int i; - reglist = adreno_gpu->info->a6xx->pwrup_reglist; - lock->gpu_req = lock->cpu_req = lock->turn = 0; - lock->ifpc_list_len = 0; - lock->preemption_list_len = reglist->count; + + reglist = adreno_gpu->info->a6xx->ifpc_reglist; + lock->ifpc_list_len = reglist->count; /* * For each entry in each of the lists, write the offset and the current @@ -752,6 +842,14 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu) *dest++ = gpu_read(gpu, reglist->regs[i]); } + reglist = adreno_gpu->info->a6xx->pwrup_reglist; + lock->preemption_list_len = reglist->count; + + for (i = 0; i < reglist->count; i++) { + *dest++ = reglist->regs[i]; + *dest++ = gpu_read(gpu, reglist->regs[i]); + } + /* * The overall register list is composed of * 1. Static IFPC-only registers @@ -970,7 +1068,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { - msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); + msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm); drm_gem_object_put(a6xx_gpu->sqe_bo); a6xx_gpu->sqe_bo = NULL; @@ -987,7 +1085,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, sizeof(u32) * gpu->nr_rings, MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a6xx_gpu->shadow_bo, + gpu->vm, &a6xx_gpu->shadow_bo, &a6xx_gpu->shadow_iova); if (IS_ERR(a6xx_gpu->shadow)) @@ -998,7 +1096,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE, MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a6xx_gpu->pwrup_reglist_bo, + gpu->vm, &a6xx_gpu->pwrup_reglist_bo, &a6xx_gpu->pwrup_reglist_iova); if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr)) @@ -1239,14 +1337,14 @@ static int hw_init(struct msm_gpu *gpu) /* Set weights for bicubic filtering */ if (adreno_is_a650_family(adreno_gpu) || adreno_is_x185(adreno_gpu)) { - gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0); - gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), 0); + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(1), 0x3fe05ff4); - gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2, + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2), 0x3fa0ebee); - gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3, + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3), 0x3f5193ed); - gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4, + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4), 0x3f0243f0); } @@ -1446,21 +1544,25 @@ static void a6xx_recover(struct msm_gpu *gpu) adreno_dump_info(gpu); - for (i = 0; i < 8; i++) - DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, - gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); + if (a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) { + /* Sometimes crashstate capture is skipped, so SQE should be halted here again */ + gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); + + for (i = 0; i < 8; i++) + DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); + + if (hang_debug) + a6xx_dump(gpu); - if (hang_debug) - a6xx_dump(gpu); + } /* * To handle recovery specific sequences during the rpm suspend we are * about to trigger */ - a6xx_gpu->hung = true; - /* Halt SQE first */ - gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); + a6xx_gpu->hung = true; pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); @@ -1691,8 +1793,6 @@ static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu) static void a6xx_fault_detect_irq(struct msm_gpu *gpu) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); /* @@ -1704,13 +1804,6 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu) if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT) return; - /* - * Force the GPU to stay on until after we finish - * collecting information - */ - if (!adreno_has_gmu_wrapper(adreno_gpu)) - gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); - DRM_DEV_ERROR(&gpu->pdev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, @@ -1725,6 +1818,9 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu) /* Turn off the hangcheck timer to keep it from bothering us */ timer_delete(&gpu->hangcheck_timer); + /* Turn off interrupts to avoid triggering recovery again */ + gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, 0); + kthread_queue_work(gpu->worker, &gpu->recover_work); } @@ -1749,9 +1845,49 @@ static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu) } } +static void a6xx_gpu_keepalive_vote(struct msm_gpu *gpu, bool on) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + if (adreno_has_gmu_wrapper(adreno_gpu)) + return; + + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, on); +} + +static int irq_poll_fence(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + u32 status; + + if (adreno_has_gmu_wrapper(adreno_gpu)) + return 0; + + if (gmu_poll_timeout_atomic(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, status, !status, 1, 100)) { + u32 rbbm_unmasked = gmu_read(gmu, REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS); + + dev_err_ratelimited(&gpu->pdev->dev, + "irq fence poll timeout, fence_ctrl=0x%x, unmasked_status=0x%x\n", + status, rbbm_unmasked); + return -ETIMEDOUT; + } + + return 0; +} + static irqreturn_t a6xx_irq(struct msm_gpu *gpu) { struct msm_drm_private *priv = gpu->dev->dev_private; + + /* Set keepalive vote to avoid power collapse after RBBM_INT_0_STATUS is read */ + a6xx_gpu_keepalive_vote(gpu, true); + + if (irq_poll_fence(gpu)) + goto done; + u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS); gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status); @@ -1788,6 +1924,9 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu) if (status & A6XX_RBBM_INT_0_MASK_CP_SW) a6xx_preempt_irq(gpu); +done: + a6xx_gpu_keepalive_vote(gpu, false); + return IRQ_HANDLED; } @@ -2177,16 +2316,7 @@ static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - mutex_lock(&a6xx_gpu->gmu.lock); - - /* Force the GPU power on so we can read this register */ - a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); - - *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); - - a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); - - mutex_unlock(&a6xx_gpu->gmu.lock); + *value = read_gmu_ao_counter(a6xx_gpu); return 0; } @@ -2211,12 +2341,12 @@ static void a6xx_destroy(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); if (a6xx_gpu->sqe_bo) { - msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); + msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->vm); drm_gem_object_put(a6xx_gpu->sqe_bo); } if (a6xx_gpu->shadow_bo) { - msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); + msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->vm); drm_gem_object_put(a6xx_gpu->shadow_bo); } @@ -2256,8 +2386,8 @@ static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, mutex_unlock(&a6xx_gpu->gmu.lock); } -static struct msm_gem_address_space * -a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) +static struct drm_gpuvm * +a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); @@ -2271,22 +2401,21 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY)) quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; - return adreno_iommu_create_address_space(gpu, pdev, quirks); + return adreno_iommu_create_vm(gpu, pdev, quirks); } -static struct msm_gem_address_space * -a6xx_create_private_address_space(struct msm_gpu *gpu) +static struct drm_gpuvm * +a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed) { struct msm_mmu *mmu; - mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); + mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, kernel_managed); if (IS_ERR(mmu)) return ERR_CAST(mmu); - return msm_gem_address_space_create(mmu, - "gpu", ADRENO_VM_START, - adreno_private_address_space_size(gpu)); + return msm_gem_vm_create(gpu->dev, mmu, "gpu", ADRENO_VM_START, + adreno_private_vm_size(gpu), kernel_managed); } static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) @@ -2297,18 +2426,36 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) return a6xx_gpu->shadow[ring->id]; + /* + * This is true only on an A6XX_GEN1 with GMU, has IFPC enabled and a super old SQE firmware + * without 'whereami' support + */ + WARN_ONCE((to_adreno_gpu(gpu)->info->quirks & ADRENO_QUIRK_IFPC), + "Can't read CP_RB_RPTR register reliably\n"); + return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); } static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { - struct msm_cp_state cp_state = { + struct msm_cp_state cp_state; + bool progress; + + /* + * With IFPC, KMD doesn't know whether GX power domain is collapsed + * or not. So, we can't blindly read the below registers in GX domain. + * Lets trust the hang detection in HW and lie to the caller that + * there was progress. + */ + if (to_adreno_gpu(gpu)->info->quirks & ADRENO_QUIRK_IFPC) + return true; + + cp_state = (struct msm_cp_state) { .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE), }; - bool progress; /* * Adjust the remaining data to account for what has already been @@ -2403,10 +2550,11 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, #endif - .create_address_space = a6xx_create_address_space, - .create_private_address_space = a6xx_create_private_address_space, + .create_vm = a6xx_create_vm, + .create_private_vm = a6xx_create_private_vm, .get_rptr = a6xx_get_rptr, .progress = a6xx_progress, + .sysprof_setup = a6xx_gmu_sysprof_setup, }, .get_timestamp = a6xx_gmu_get_timestamp, }; @@ -2432,8 +2580,8 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = { .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, #endif - .create_address_space = a6xx_create_address_space, - .create_private_address_space = a6xx_create_private_address_space, + .create_vm = a6xx_create_vm, + .create_private_vm = a6xx_create_private_vm, .get_rptr = a6xx_get_rptr, .progress = a6xx_progress, }, @@ -2463,10 +2611,11 @@ static const struct adreno_gpu_funcs funcs_a7xx = { .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, #endif - .create_address_space = a6xx_create_address_space, - .create_private_address_space = a6xx_create_private_address_space, + .create_vm = a6xx_create_vm, + .create_private_vm = a6xx_create_private_vm, .get_rptr = a6xx_get_rptr, .progress = a6xx_progress, + .sysprof_setup = a6xx_gmu_sysprof_setup, }, .get_timestamp = a6xx_gmu_get_timestamp, }; @@ -2560,11 +2709,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_gpu->uche_trap_base = 0x1fffffffff000ull; - if (gpu->aspace) - msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, - a6xx_fault_handler); + msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, + a6xx_fault_handler); + + ret = a6xx_calc_ubwc_config(adreno_gpu); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } - a6xx_calc_ubwc_config(adreno_gpu); /* Set up the preemption specific bits and pieces for each ringbuffer */ a6xx_preempt_init(gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 9201a53dd341..0b17d36c36a9 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -6,6 +6,10 @@ #include "adreno_gpu.h" +#include "a6xx_enums.xml.h" +#include "a7xx_enums.xml.h" +#include "a6xx_perfcntrs.xml.h" +#include "a7xx_perfcntrs.xml.h" #include "a6xx.xml.h" #include "a6xx_gmu.h" @@ -41,6 +45,7 @@ struct a6xx_info { const struct adreno_reglist *hwcg; const struct adreno_protect *protect; const struct adreno_reglist_list *pwrup_reglist; + const struct adreno_reglist_list *ifpc_reglist; u32 gmu_chipid; u32 gmu_cgc_mode; u32 prim_fifo_threshold; @@ -250,6 +255,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); +void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu); void a6xx_preempt_init(struct msm_gpu *gpu); void a6xx_preempt_hw_init(struct msm_gpu *gpu); @@ -291,5 +297,6 @@ int a6xx_gpu_state_put(struct msm_gpu_state *state); void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off); void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert); +int a6xx_fenced_write(struct a6xx_gpu *gpu, u32 offset, u64 value, u32 mask, bool is_64b); #endif /* __A6XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 341a72a67401..4c7f3c642f6a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -11,7 +11,7 @@ static const unsigned int *gen7_0_0_external_core_regs[] __always_unused; static const unsigned int *gen7_2_0_external_core_regs[] __always_unused; static const unsigned int *gen7_9_0_external_core_regs[] __always_unused; -static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused; +static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused; static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused; #include "adreno_gen7_0_0_snapshot.h" @@ -132,7 +132,7 @@ static int a6xx_crashdumper_init(struct msm_gpu *gpu, struct a6xx_crashdumper *dumper) { dumper->ptr = msm_gem_kernel_new(gpu->dev, - SZ_1M, MSM_BO_WC, gpu->aspace, + SZ_1M, MSM_BO_WC, gpu->vm, &dumper->bo, &dumper->iova); if (!IS_ERR(dumper->ptr)) @@ -158,7 +158,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu, /* Make sure all pending memory writes are posted */ wmb(); - gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova); + gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova); gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); @@ -174,8 +174,15 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu, static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, u32 *data) { - u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) | - A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block); + u32 reg; + + if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) { + reg = A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) | + A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block); + } else { + reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) | + A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block); + } gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg); gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg); @@ -198,11 +205,18 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, readl((ptr) + ((offset) << 2)) /* read a value from the CX debug bus */ -static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset, +static int cx_debugbus_read(struct msm_gpu *gpu, void __iomem *cxdbg, u32 block, u32 offset, u32 *data) { - u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) | - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block); + u32 reg; + + if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) { + reg = A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) | + A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block); + } else { + reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) | + A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block); + } cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg); cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg); @@ -315,7 +329,8 @@ static void a6xx_get_debugbus_block(struct msm_gpu *gpu, ptr += debugbus_read(gpu, block->id, i, ptr); } -static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg, +static void a6xx_get_cx_debugbus_block(struct msm_gpu *gpu, + void __iomem *cxdbg, struct a6xx_gpu_state *a6xx_state, const struct a6xx_debugbus_block *block, struct a6xx_gpu_state_obj *obj) @@ -330,7 +345,7 @@ static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg, obj->handle = block; for (ptr = obj->data, i = 0; i < block->count; i++) - ptr += cx_debugbus_read(cxdbg, block->id, i, ptr); + ptr += cx_debugbus_read(gpu, cxdbg, block->id, i, ptr); } static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu, @@ -423,8 +438,9 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu, a6xx_state, &a7xx_debugbus_blocks[gbif_debugbus_blocks[i]], &a6xx_state->debugbus[i + debugbus_blocks_count]); } - } + a6xx_state->nr_debugbus = total_debugbus_blocks; + } } static void a6xx_get_debugbus(struct msm_gpu *gpu, @@ -526,7 +542,8 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, int i; for (i = 0; i < nr_cx_debugbus_blocks; i++) - a6xx_get_cx_debugbus_block(cxdbg, + a6xx_get_cx_debugbus_block(gpu, + cxdbg, a6xx_state, &cx_debugbus_blocks[i], &a6xx_state->cx_debugbus[i]); @@ -759,15 +776,15 @@ static void a7xx_get_cluster(struct msm_gpu *gpu, size_t datasize; int i, regcount = 0; - /* Some clusters need a selector register to be programmed too */ - if (cluster->sel) - in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val); - in += CRASHDUMP_WRITE(in, REG_A7XX_CP_APERTURE_CNTL_CD, A7XX_CP_APERTURE_CNTL_CD_PIPE(cluster->pipe_id) | A7XX_CP_APERTURE_CNTL_CD_CLUSTER(cluster->cluster_id) | A7XX_CP_APERTURE_CNTL_CD_CONTEXT(cluster->context_id)); + /* Some clusters need a selector register to be programmed too */ + if (cluster->sel) + in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val); + for (i = 0; cluster->regs[i] != UINT_MAX; i += 2) { int count = RANGE(cluster->regs, i); @@ -1569,8 +1586,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state), GFP_KERNEL); - bool stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & - A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT); + bool stalled; if (!a6xx_state) return ERR_PTR(-ENOMEM); @@ -1591,15 +1607,20 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) } /* If GX isn't on the rest of the data isn't going to be accessible */ - if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) + if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) return &a6xx_state->base; + /* Halt SQE first */ + gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); + /* Get the banks of indexed registers */ if (adreno_is_a7xx(adreno_gpu)) a7xx_get_indexed_registers(gpu, a6xx_state); else a6xx_get_indexed_registers(gpu, a6xx_state); + stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & + A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT); /* * Try to initialize the crashdumper, if we are not dumping state * with the SMMU stalled. The crashdumper needs memory access to @@ -1619,7 +1640,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) a7xx_get_clusters(gpu, a6xx_state, dumper); a7xx_get_dbgahb_clusters(gpu, a6xx_state, dumper); - msm_gem_kernel_put(dumper->bo, gpu->aspace); + msm_gem_kernel_put(dumper->bo, gpu->vm); } a7xx_get_post_crashdumper_registers(gpu, a6xx_state); @@ -1631,7 +1652,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) a6xx_get_clusters(gpu, a6xx_state, dumper); a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper); - msm_gem_kernel_put(dumper->bo, gpu->aspace); + msm_gem_kernel_put(dumper->bo, gpu->vm); } } @@ -1796,6 +1817,7 @@ static void a7xx_show_shader(struct a6xx_gpu_state_obj *obj, print_name(p, " - type: ", a7xx_statetype_names[block->statetype]); print_name(p, " - pipe: ", a7xx_pipe_names[block->pipeid]); + drm_printf(p, " - location: %d\n", block->location); for (i = 0; i < block->num_sps; i++) { drm_printf(p, " - sp: %d\n", i); @@ -1873,6 +1895,7 @@ static void a7xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj, print_name(p, " - pipe: ", a7xx_pipe_names[dbgahb->pipe_id]); print_name(p, " - cluster-name: ", a7xx_cluster_names[dbgahb->cluster_id]); drm_printf(p, " - context: %d\n", dbgahb->context_id); + drm_printf(p, " - location: %d\n", dbgahb->location_id); a7xx_show_registers_indented(dbgahb->regs, obj->data, p, 4); } } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index e545106c70be..1c18499b60bb 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -212,7 +212,7 @@ static const struct a6xx_shader_block { SHADER(A6XX_SP_LB_5_DATA, 0x200), SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800), SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280), - SHADER(A6XX_SP_UAV_DATA, 0x80), + SHADER(A6XX_SP_GFX_UAV_BASE_DATA, 0x80), SHADER(A6XX_SP_INST_TAG, 0x80), SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80), SHADER(A6XX_SP_TMO_UMO_TAG, 0x80), @@ -419,47 +419,47 @@ static const struct a6xx_indexed_registers a6xx_indexed_reglist[] = { REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL }, { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL }, - { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, + { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL }, - { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR, + { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR, REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size}, }; static const struct a6xx_indexed_registers a7xx_indexed_reglist[] = { { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR, - REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL }, + REG_A6XX_CP_SQE_STAT_DATA, 0x40, NULL }, { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL }, - { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, + { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL }, - { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR, - REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL }, - { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR, + { "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR, + REG_A7XX_CP_BV_SQE_STAT_DATA, 0x40, NULL }, + { "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR, REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL }, - { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR, + { "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR, REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL }, - { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR, - REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL }, - { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR, + { "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR, + REG_A7XX_CP_SQE_AC_STAT_DATA, 0x40, NULL }, + { "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR, REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL }, - { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR, + { "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR, REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL }, - { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR, + { "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR, REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL }, - { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR, + { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR, REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size }, }; static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = { - "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, + "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR, REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL, }; static const struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = { - { "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, - REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL }, - { "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR, - REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL }, + { "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR, + REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2200, NULL }, + { "CP_BV_MEM_POOL_DBG", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR, + REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2200, NULL }, }; #define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index 8e69b1e84657..550de6ad68ef 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -21,6 +21,7 @@ static const char * const a6xx_hfi_msg_id[] = { HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE), HFI_MSG_ID(HFI_H2F_MSG_TEST), HFI_MSG_ID(HFI_H2F_MSG_START), + HFI_MSG_ID(HFI_H2F_FEATURE_CTRL), HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START), HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE), HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER), @@ -765,23 +766,40 @@ send: NULL, 0); } +static int a6xx_hfi_feature_ctrl_msg(struct a6xx_gmu *gmu, u32 feature, u32 enable, u32 data) +{ + struct a6xx_hfi_msg_feature_ctrl msg = { + .feature = feature, + .enable = enable, + .data = data, + }; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0); +} + +#define HFI_FEATURE_IFPC 9 +#define IFPC_LONG_HYST 0x1680 + +static int a6xx_hfi_enable_ifpc(struct a6xx_gmu *gmu) +{ + if (gmu->idle_level != GMU_IDLE_STATE_IFPC) + return 0; + + return a6xx_hfi_feature_ctrl_msg(gmu, HFI_FEATURE_IFPC, 1, IFPC_LONG_HYST); +} + #define HFI_FEATURE_ACD 12 static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu) { struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table; - struct a6xx_hfi_msg_feature_ctrl msg = { - .feature = HFI_FEATURE_ACD, - .enable = 1, - .data = 0, - }; int ret; if (!acd_table->enable_by_level) return 0; /* Enable ACD feature at GMU */ - ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0); + ret = a6xx_hfi_feature_ctrl_msg(gmu, HFI_FEATURE_ACD, 1, 0); if (ret) { DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret); return ret; @@ -898,6 +916,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) if (ret) return ret; + ret = a6xx_hfi_enable_ifpc(gmu); + if (ret) + return ret; + ret = a6xx_hfi_send_core_fw_start(gmu); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c index 3b17fd2dba89..afc5f4aa3b17 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c @@ -41,7 +41,7 @@ static inline void set_preempt_state(struct a6xx_gpu *gpu, } /* Write the most recent wptr for the given ring into the hardware */ -static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +static inline void update_wptr(struct a6xx_gpu *a6xx_gpu, struct msm_ringbuffer *ring) { unsigned long flags; uint32_t wptr; @@ -51,7 +51,7 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) if (ring->restore_wptr) { wptr = get_wptr(ring); - gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false); ring->restore_wptr = false; } @@ -111,9 +111,9 @@ static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu) postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6); postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ); - postamble[count++] = CP_WAIT_REG_MEM_1_POLL_ADDR_LO( + postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_LO( REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS); - postamble[count++] = CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0); + postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_HI(0); postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1); postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1); postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0); @@ -136,6 +136,21 @@ static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu) a6xx_gpu->postamble_enabled = false; } +/* + * Set preemption keepalive vote. Please note that this vote is different from the one used in + * a6xx_irq() + */ +static void a6xx_preempt_keepalive_vote(struct msm_gpu *gpu, bool on) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + if (adreno_has_gmu_wrapper(adreno_gpu)) + return; + + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_PWR_COL_PREEMPT_KEEPALIVE, on); +} + void a6xx_preempt_irq(struct msm_gpu *gpu) { uint32_t status; @@ -172,10 +187,12 @@ void a6xx_preempt_irq(struct msm_gpu *gpu) set_preempt_state(a6xx_gpu, PREEMPT_FINISH); - update_wptr(gpu, a6xx_gpu->cur_ring); + update_wptr(a6xx_gpu, a6xx_gpu->cur_ring); set_preempt_state(a6xx_gpu, PREEMPT_NONE); + a6xx_preempt_keepalive_vote(gpu, false); + trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id); /* @@ -210,7 +227,7 @@ void a6xx_preempt_hw_init(struct msm_gpu *gpu) gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0); /* Enable the GMEM save/restore feature for preemption */ - gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1); + gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 0x1); /* Reset the preemption state */ set_preempt_state(a6xx_gpu, PREEMPT_NONE); @@ -268,7 +285,7 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu) */ if (!ring || (a6xx_gpu->cur_ring == ring)) { set_preempt_state(a6xx_gpu, PREEMPT_FINISH); - update_wptr(gpu, a6xx_gpu->cur_ring); + update_wptr(a6xx_gpu, a6xx_gpu->cur_ring); set_preempt_state(a6xx_gpu, PREEMPT_NONE); spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); return; @@ -302,13 +319,16 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu) spin_unlock_irqrestore(&ring->preempt_lock, flags); - gpu_write64(gpu, - REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, - a6xx_gpu->preempt_smmu_iova[ring->id]); + /* Set the keepalive bit to keep the GPU ON until preemption is complete */ + a6xx_preempt_keepalive_vote(gpu, true); + + a6xx_fenced_write(a6xx_gpu, + REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, a6xx_gpu->preempt_smmu_iova[ring->id], + BIT(1), true); - gpu_write64(gpu, + a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR, - a6xx_gpu->preempt_iova[ring->id]); + a6xx_gpu->preempt_iova[ring->id], BIT(1), true); a6xx_gpu->next_ring = ring; @@ -328,7 +348,7 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu) set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED); /* Trigger the preemption */ - gpu_write(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl); + a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl, BIT(1), false); } static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, @@ -344,7 +364,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, ptr = msm_gem_kernel_new(gpu->dev, PREEMPT_RECORD_SIZE(adreno_gpu), - MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); + MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->vm, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -362,7 +382,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, ptr = msm_gem_kernel_new(gpu->dev, PREEMPT_SMMU_INFO_SIZE, MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, - gpu->aspace, &bo, &iova); + gpu->vm, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -377,7 +397,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, struct a7xx_cp_smmu_info *smmu_info_ptr = ptr; - msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid); + msm_iommu_pagetable_params(to_msm_vm(gpu->vm)->mmu, &ttbr, &asid); smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC; smmu_info_ptr->ttbr0 = ttbr; @@ -405,7 +425,7 @@ void a6xx_preempt_fini(struct msm_gpu *gpu) int i; for (i = 0; i < gpu->nr_rings; i++) - msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace); + msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->vm); } void a6xx_preempt_init(struct msm_gpu *gpu) @@ -431,7 +451,7 @@ void a6xx_preempt_init(struct msm_gpu *gpu) a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE, MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, - gpu->aspace, &a6xx_gpu->preempt_postamble_bo, + gpu->vm, &a6xx_gpu->preempt_postamble_bo, &a6xx_gpu->preempt_postamble_iova); preempt_prepare_postamble(a6xx_gpu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 16e7ac444efd..28f744f3caf7 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -16,10 +16,6 @@ bool snapshot_debugbus = false; MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)"); module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600); -bool allow_vram_carveout = false; -MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU"); -module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600); - int enable_preemption = -1; MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))"); module_param(enable_preemption, int, 0600); @@ -28,6 +24,10 @@ bool disable_acd; MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD"); module_param_unsafe(disable_acd, bool, 0400); +static bool skip_gpu; +MODULE_PARM_DESC(no_gpu, "Disable GPU driver register (0=enable GPU driver register (default), 1=skip GPU driver register"); +module_param(skip_gpu, bool, 0400); + extern const struct adreno_gpulist a2xx_gpulist; extern const struct adreno_gpulist a3xx_gpulist; extern const struct adreno_gpulist a4xx_gpulist; @@ -188,6 +188,9 @@ bool adreno_has_gpu(struct device_node *node) uint32_t chip_id; int ret; + if (skip_gpu) + return false; + ret = find_chipid(node, &chip_id); if (ret) return false; @@ -264,42 +267,23 @@ static const struct component_ops a3xx_ops = { .unbind = adreno_unbind, }; -static void adreno_device_register_headless(void) -{ - /* on imx5, we don't have a top-level mdp/dpu node - * this creates a dummy node for the driver for that case - */ - struct platform_device_info dummy_info = { - .parent = NULL, - .name = "msm", - .id = -1, - .res = NULL, - .num_res = 0, - .data = NULL, - .size_data = 0, - .dma_mask = ~0, - }; - platform_device_register_full(&dummy_info); -} - static int adreno_probe(struct platform_device *pdev) { + if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon") || + msm_gpu_no_components()) + return msm_gpu_probe(pdev, &a3xx_ops); - int ret; - - ret = component_add(&pdev->dev, &a3xx_ops); - if (ret) - return ret; - - if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon")) - adreno_device_register_headless(); - - return 0; + return component_add(&pdev->dev, &a3xx_ops); } static void adreno_remove(struct platform_device *pdev) { - component_del(&pdev->dev, &a3xx_ops); + struct msm_drm_private *priv = platform_get_drvdata(pdev); + + if (priv->kms_init) + component_del(&pdev->dev, &a3xx_ops); + else + msm_gpu_remove(pdev, &a3xx_ops); } static void adreno_shutdown(struct platform_device *pdev) @@ -427,10 +411,16 @@ static struct platform_driver adreno_driver = { void __init adreno_register(void) { + if (skip_gpu) + return; + platform_driver_register(&adreno_driver); } void __exit adreno_unregister(void) { + if (skip_gpu) + return; + platform_driver_unregister(&adreno_driver); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h index cb66ece6606b..04b49d385f9d 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h @@ -81,7 +81,7 @@ static const u32 gen7_0_0_debugbus_blocks[] = { A7XX_DBGBUS_USPTP_7, }; -static struct gen7_shader_block gen7_0_0_shader_blocks[] = { +static const struct gen7_shader_block gen7_0_0_shader_blocks[] = { {A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, {A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP}, @@ -668,12 +668,19 @@ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8)); -/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */ -static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = { +/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */ +static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = { 0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c, 0x0b60f, 0x0b621, 0x0b630, 0x0b633, UINT_MAX, UINT_MAX, }; +static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8)); + +/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */ +static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = { + 0x0b600, 0x0b600, + UINT_MAX, UINT_MAX, +}; static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8)); /* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */ @@ -695,7 +702,7 @@ static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = { .val = 0x9, }; -static struct gen7_cluster_registers gen7_0_0_clusters[] = { +static const struct gen7_cluster_registers gen7_0_0_clusters[] = { { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, gen7_0_0_noncontext_pipe_br_registers, }, { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT, @@ -764,7 +771,7 @@ static struct gen7_cluster_registers gen7_0_0_clusters[] = { gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, }, }; -static struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = { +static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = { { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 }, { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, @@ -914,7 +921,7 @@ static const u32 gen7_0_0_dpm_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_0_0_dpm_registers), 8)); -static struct gen7_reg_list gen7_0_0_reg_list[] = { +static const struct gen7_reg_list gen7_0_0_reg_list[] = { { gen7_0_0_gpu_registers, NULL }, { gen7_0_0_cx_misc_registers, NULL }, { gen7_0_0_dpm_registers, NULL }, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h index 6f8ad50f32ce..772652eb61f3 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h @@ -95,7 +95,7 @@ static const u32 gen7_2_0_debugbus_blocks[] = { A7XX_DBGBUS_CCHE_2, }; -static struct gen7_shader_block gen7_2_0_shader_blocks[] = { +static const struct gen7_shader_block gen7_2_0_shader_blocks[] = { {A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, {A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP}, @@ -489,7 +489,7 @@ static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = { .val = 0x9, }; -static struct gen7_cluster_registers gen7_2_0_clusters[] = { +static const struct gen7_cluster_registers gen7_2_0_clusters[] = { { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, gen7_2_0_noncontext_pipe_br_registers, }, { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT, @@ -558,7 +558,7 @@ static struct gen7_cluster_registers gen7_2_0_clusters[] = { gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, }, }; -static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = { +static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = { { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 }, { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, @@ -573,6 +573,8 @@ static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = { gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 }, { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 }, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP, + gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 }, { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 }, { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, @@ -737,7 +739,7 @@ static const u32 gen7_2_0_dpm_registers[] = { }; static_assert(IS_ALIGNED(sizeof(gen7_2_0_dpm_registers), 8)); -static struct gen7_reg_list gen7_2_0_reg_list[] = { +static const struct gen7_reg_list gen7_2_0_reg_list[] = { { gen7_2_0_gpu_registers, NULL }, { gen7_2_0_cx_misc_registers, NULL }, { gen7_2_0_dpm_registers, NULL }, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h index 9a327d543f27..0956dfca1f05 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h @@ -117,7 +117,7 @@ static const u32 gen7_9_0_cx_debugbus_blocks[] = { A7XX_DBGBUS_GBIF_CX, }; -static struct gen7_shader_block gen7_9_0_shader_blocks[] = { +static const struct gen7_shader_block gen7_9_0_shader_blocks[] = { { A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, { A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, { A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, @@ -1116,7 +1116,7 @@ static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = { .val = 0x9, }; -static struct gen7_cluster_registers gen7_9_0_clusters[] = { +static const struct gen7_cluster_registers gen7_9_0_clusters[] = { { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, gen7_9_0_non_context_pipe_br_registers, }, { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT, @@ -1185,7 +1185,7 @@ static struct gen7_cluster_registers gen7_9_0_clusters[] = { gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, }, }; -static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = { +static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = { { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00}, { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, @@ -1294,34 +1294,34 @@ static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = { gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000}, }; -static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = { +static const struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = { { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR, REG_A6XX_CP_SQE_STAT_DATA, 0x00040}, { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, REG_A6XX_CP_DRAW_STATE_DATA, 0x00200}, - { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR, + { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR, REG_A6XX_CP_ROQ_DBG_DATA, 0x00800}, - { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, + { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000}, - { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR, + { "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR, REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200}, - { "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR, + { "CP_BV_ROQ_DBG", REG_A7XX_CP_BV_ROQ_DBG_ADDR, REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800}, - { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR, + { "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR, REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000}, - { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR, + { "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR, REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040}, - { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR, - REG_A7XX_CP_RESOURCE_TBL_DBG_DATA, 0x04100}, - { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR, + { "CP_RESOURCE_TABLE_DBG", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR, + REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100}, + { "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR, REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200}, - { "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR, + { "CP_LPAC_ROQ_DBG", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR, REG_A7XX_CP_LPAC_ROQ_DBG_DATA, 0x00200}, - { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR, + { "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR, REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x08000}, - { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR, + { "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR, REG_A7XX_CP_SQE_AC_STAT_DATA, 0x00040}, - { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR, + { "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR, REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x00040}, { "CP_AQE_ROQ_0", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_0, REG_A7XX_CP_AQE_ROQ_DBG_DATA_0, 0x00100}, @@ -1337,7 +1337,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = { REG_A7XX_CP_AQE_STAT_DATA_1, 0x00040}, }; -static struct gen7_reg_list gen7_9_0_reg_list[] = { +static const struct gen7_reg_list gen7_9_0_reg_list[] = { { gen7_9_0_gpu_registers, NULL}, { gen7_9_0_cx_misc_registers, NULL}, { gen7_9_0_cx_dbgc_registers, NULL}, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 86bff915c3e7..afaa3cfefd35 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -10,7 +10,7 @@ #include <linux/interconnect.h> #include <linux/firmware/qcom/qcom_scm.h> #include <linux/kernel.h> -#include <linux/of_address.h> +#include <linux/of_reserved_mem.h> #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/soc/qcom/mdt_loader.h> @@ -33,7 +33,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, struct device *dev = &gpu->pdev->dev; const struct firmware *fw; const char *signed_fwname = NULL; - struct device_node *np, *mem_np; + struct device_node *np; struct resource r; phys_addr_t mem_phys; ssize_t mem_size; @@ -51,18 +51,11 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, return -ENODEV; } - mem_np = of_parse_phandle(np, "memory-region", 0); - of_node_put(np); - if (!mem_np) { + ret = of_reserved_mem_region_to_resource(np, 0, &r); + if (ret) { zap_available = false; - return -EINVAL; - } - - ret = of_address_to_resource(mem_np, 0, &r); - of_node_put(mem_np); - if (ret) return ret; - + } mem_phys = r.start; /* @@ -191,25 +184,25 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); } -struct msm_gem_address_space * -adreno_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev) +struct drm_gpuvm * +adreno_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev) { - return adreno_iommu_create_address_space(gpu, pdev, 0); + return adreno_iommu_create_vm(gpu, pdev, 0); } -struct msm_gem_address_space * -adreno_iommu_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev, - unsigned long quirks) +struct drm_gpuvm * +adreno_iommu_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev, + unsigned long quirks) { struct iommu_domain_geometry *geometry; struct msm_mmu *mmu; - struct msm_gem_address_space *aspace; + struct drm_gpuvm *vm; u64 start, size; mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); - if (IS_ERR_OR_NULL(mmu)) + if (IS_ERR(mmu)) return ERR_CAST(mmu); geometry = msm_iommu_get_geometry(mmu); @@ -224,16 +217,16 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu, start = max_t(u64, SZ_16M, geometry->aperture_start); size = geometry->aperture_end - start + 1; - aspace = msm_gem_address_space_create(mmu, "gpu", - start & GENMASK_ULL(48, 0), size); + vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", start & GENMASK_ULL(48, 0), + size, true); - if (IS_ERR(aspace) && !IS_ERR(mmu)) + if (IS_ERR(vm) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); - return aspace; + return vm; } -u64 adreno_private_address_space_size(struct msm_gpu *gpu) +u64 adreno_private_vm_size(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); @@ -273,9 +266,11 @@ void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu) if (!priv->stall_enabled && ktime_after(ktime_get(), priv->stall_reenable_time) && !READ_ONCE(gpu->crashstate)) { + struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; + priv->stall_enabled = true; - gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, true); + mmu->funcs->set_stall(mmu, true); } spin_unlock_irqrestore(&priv->fault_stall_lock, flags); } @@ -290,6 +285,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, u32 scratch[4]) { struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; const char *type = "UNKNOWN"; bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) && !READ_ONCE(gpu->crashstate); @@ -303,8 +299,9 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, if (priv->stall_enabled) { priv->stall_enabled = false; - gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, false); + mmu->funcs->set_stall(mmu, false); } + priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500); spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags); @@ -351,11 +348,20 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, return 0; } -int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +static bool +adreno_smmu_has_prr(struct msm_gpu *gpu) +{ + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); + return adreno_smmu && adreno_smmu->set_prr_addr; +} + +int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t *value, uint32_t *len) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct drm_device *drm = gpu->dev; + /* Note ctx can be NULL when called from rd_open(): */ + struct drm_gpuvm *vm = ctx ? msm_context_vm(drm, ctx) : NULL; /* No pointer params yet */ if (*len != 0) @@ -401,8 +407,8 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, *value = 0; return 0; case MSM_PARAM_FAULTS: - if (ctx->aspace) - *value = gpu->global_faults + ctx->aspace->faults; + if (vm) + *value = gpu->global_faults + to_msm_vm(vm)->faults; else *value = gpu->global_faults; return 0; @@ -410,36 +416,39 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, *value = gpu->suspend_count; return 0; case MSM_PARAM_VA_START: - if (ctx->aspace == gpu->aspace) + if (vm == gpu->vm) return UERR(EINVAL, drm, "requires per-process pgtables"); - *value = ctx->aspace->va_start; + *value = vm->mm_start; return 0; case MSM_PARAM_VA_SIZE: - if (ctx->aspace == gpu->aspace) + if (vm == gpu->vm) return UERR(EINVAL, drm, "requires per-process pgtables"); - *value = ctx->aspace->va_size; + *value = vm->mm_range; return 0; case MSM_PARAM_HIGHEST_BANK_BIT: - *value = adreno_gpu->ubwc_config.highest_bank_bit; + *value = adreno_gpu->ubwc_config->highest_bank_bit; return 0; case MSM_PARAM_RAYTRACING: *value = adreno_gpu->has_ray_tracing; return 0; case MSM_PARAM_UBWC_SWIZZLE: - *value = adreno_gpu->ubwc_config.ubwc_swizzle; + *value = adreno_gpu->ubwc_config->ubwc_swizzle; return 0; case MSM_PARAM_MACROTILE_MODE: - *value = adreno_gpu->ubwc_config.macrotile_mode; + *value = adreno_gpu->ubwc_config->macrotile_mode; return 0; case MSM_PARAM_UCHE_TRAP_BASE: *value = adreno_gpu->uche_trap_base; return 0; + case MSM_PARAM_HAS_PRR: + *value = adreno_smmu_has_prr(gpu); + return 0; default: return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); } } -int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t value, uint32_t len) { struct drm_device *drm = gpu->dev; @@ -485,7 +494,22 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, case MSM_PARAM_SYSPROF: if (!capable(CAP_SYS_ADMIN)) return UERR(EPERM, drm, "invalid permissions"); - return msm_file_private_set_sysprof(ctx, gpu, value); + return msm_context_set_sysprof(ctx, gpu, value); + case MSM_PARAM_EN_VM_BIND: + /* We can only support VM_BIND with per-process pgtables: */ + if (ctx->vm == gpu->vm) + return UERR(EINVAL, drm, "requires per-process pgtables"); + + /* + * We can only swtich to VM_BIND mode if the VM has not yet + * been created: + */ + if (ctx->vm) + return UERR(EBUSY, drm, "VM already created"); + + ctx->userspace_managed_vm = value; + + return 0; default: return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); } @@ -607,7 +631,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, void *ptr; ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, - MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &bo, iova); if (IS_ERR(ptr)) return ERR_CAST(ptr); @@ -800,6 +824,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state) for (i = 0; state->bos && i < state->nr_bos; i++) kvfree(state->bos[i].data); + kfree(state->vm_logs); kfree(state->bos); kfree(state->comm); kfree(state->cmd); @@ -940,6 +965,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]); } + if (state->vm_logs) { + drm_puts(p, "vm-log:\n"); + for (i = 0; i < state->nr_vm_logs; i++) { + struct msm_gem_vm_log_entry *e = &state->vm_logs[i]; + drm_printf(p, " - %s:%d: 0x%016llx-0x%016llx\n", + e->op, e->queue_id, e->iova, + e->iova + e->range); + } + } + drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); drm_puts(p, "ringbuffer:\n"); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index bc063594a359..390fa6720d9b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -12,13 +12,14 @@ #include <linux/firmware.h> #include <linux/iopoll.h> +#include <linux/soc/qcom/ubwc.h> + #include "msm_gpu.h" #include "adreno_common.xml.h" #include "adreno_pm4.xml.h" extern bool snapshot_debugbus; -extern bool allow_vram_carveout; enum { ADRENO_FW_PM4 = 0, @@ -58,6 +59,7 @@ enum adreno_family { #define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4) #define ADRENO_QUIRK_PREEMPTION BIT(5) #define ADRENO_QUIRK_4GB_VA BIT(6) +#define ADRENO_QUIRK_IFPC BIT(7) /* Helper for formating the chip_id in the way that userspace tools like * crashdec expect. @@ -205,44 +207,12 @@ struct adreno_gpu { /* firmware: */ const struct firmware *fw[ADRENO_FW_MAX]; - struct { - /** - * @rgb565_predicator: Unknown, introduced with A650 family, - * related to UBWC mode/ver 4 - */ - u32 rgb565_predicator; - /** @uavflagprd_inv: Unknown, introduced with A650 family */ - u32 uavflagprd_inv; - /** @min_acc_len: Whether the minimum access length is 64 bits */ - u32 min_acc_len; - /** - * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling. - * - * UBWC 1.0 always enables all three levels. - * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3. - * UBWC 4.0 adds the optional ability to disable levels 2 & 3. - * - * This is a bitmask where BIT(0) enables level 1, BIT(1) - * controls level 2, and BIT(2) enables level 3. - */ - u32 ubwc_swizzle; - /** - * @highest_bank_bit: Highest Bank Bit - * - * The Highest Bank Bit value represents the bit of the highest - * DDR bank. This should ideally use DRAM type detection. - */ - u32 highest_bank_bit; - u32 amsbc; - /** - * @macrotile_mode: Macrotile Mode - * - * Whether to use 4-channel macrotiling mode or the newer - * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is - * 4-channel and 1 is 8-channel. - */ - u32 macrotile_mode; - } ubwc_config; + /* + * The migration to the central UBWC config db is still in flight - keep + * a copy containing some local fixups until that's done. + */ + const struct qcom_ubwc_cfg_data *ubwc_config; + struct qcom_ubwc_cfg_data _ubwc_config; /* * Register offsets are different between some GPUs. @@ -580,10 +550,10 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu) /* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */ #define ADRENO_VM_START 0x100000000ULL -u64 adreno_private_address_space_size(struct msm_gpu *gpu); -int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +u64 adreno_private_vm_size(struct msm_gpu *gpu); +int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t *value, uint32_t *len); -int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t value, uint32_t len); const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname); @@ -623,14 +593,14 @@ void adreno_show_object(struct drm_printer *p, void **ptr, int len, * Common helper function to initialize the default address space for arm-smmu * attached targets */ -struct msm_gem_address_space * -adreno_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev); - -struct msm_gem_address_space * -adreno_iommu_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev, - unsigned long quirks); +struct drm_gpuvm * +adreno_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev); + +struct drm_gpuvm * +adreno_iommu_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev, + unsigned long quirks); int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, struct adreno_smmu_fault_info *info, const char *block, |