diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gpu.c')
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 655 |
1 files changed, 593 insertions, 62 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index d4e85e24002f..7a0220d29a23 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -103,6 +103,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, struct msm_ringbuffer *ring, struct msm_file_private *ctx) { bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; phys_addr_t ttbr; u32 asid; u64 memptr = rbmemptr(ring, ttbr0); @@ -114,9 +115,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, return; if (!sysprof) { - /* Turn off protected mode to write to special registers */ - OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); - OUT_RING(ring, 0); + if (!adreno_is_a7xx(adreno_gpu)) { + /* Turn off protected mode to write to special registers */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + } OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1); OUT_RING(ring, 1); @@ -142,6 +145,16 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr)); /* + * Sync both threads after switching pagetables and enable BR only + * to make sure BV doesn't race ahead while BR is still switching + * pagetables. + */ + if (adreno_is_a7xx(&a6xx_gpu->base)) { + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR); + } + + /* * And finally, trigger a uche flush to be sure there isn't anything * lingering in that part of the GPU */ @@ -163,9 +176,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1)); OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0)); - /* Re-enable protected mode: */ - OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); - OUT_RING(ring, 1); + if (!adreno_is_a7xx(adreno_gpu)) { + /* Re-enable protected mode: */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + } } } @@ -252,6 +267,133 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) a6xx_flush(gpu, ring); } +static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = submit->ring; + unsigned int i, ibs = 0; + + /* + * Toggle concurrent binning for pagetable switch and set the thread to + * BR since only it can execute the pagetable switch packets. + */ + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR); + + a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + + get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), + rbmemptr_stats(ring, index, cpcycles_start)); + get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, + rbmemptr_stats(ring, index, alwayson_start)); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BOTH); + + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x101); /* IFPC disable */ + + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x00d); /* IB1LIST start */ + + /* Submit the commands */ + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + ibs++; + break; + } + + /* + * Periodically update shadow-wptr if needed, so that we + * can see partial progress of submits with large # of + * cmds.. otherwise we could needlessly stall waiting for + * ringbuffer state, simply due to looking at a shadow + * rptr value that has not been updated + */ + if ((ibs % 32) == 0) + update_shadow_rptr(gpu, ring); + } + + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x00e); /* IB1LIST end */ + + get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), + rbmemptr_stats(ring, index, cpcycles_end)); + get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, + rbmemptr_stats(ring, index, alwayson_end)); + + /* Write the fence to the scratch register */ + OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1); + OUT_RING(ring, submit->seqno); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BR); + + OUT_PKT7(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, CCU_INVALIDATE_DEPTH); + + OUT_PKT7(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, CCU_INVALIDATE_COLOR); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BV); + + /* + * Make sure the timestamp is committed once BV pipe is + * completely done with this submission. + */ + OUT_PKT7(ring, CP_EVENT_WRITE, 4); + OUT_RING(ring, CACHE_CLEAN | BIT(27)); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence))); + OUT_RING(ring, submit->seqno); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BR); + + /* + * This makes sure that BR doesn't race ahead and commit + * timestamp to memstore while BV is still processing + * this submission. + */ + OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4); + OUT_RING(ring, 0); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence))); + OUT_RING(ring, submit->seqno); + + /* write the ringbuffer timestamp */ + OUT_PKT7(ring, CP_EVENT_WRITE, 4); + OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27)); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, submit->seqno); + + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, CP_SET_THREAD_BOTH); + + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x100); /* IFPC enable */ + + trace_msm_gpu_submit_flush(submit, + gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); + + a6xx_flush(gpu, ring); +} + const struct adreno_reglist a612_hwcg[] = { {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222}, {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, @@ -695,6 +837,121 @@ const struct adreno_reglist a690_hwcg[] = { {} }; +const struct adreno_reglist a730_hwcg[] = { + { REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 }, + { REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022222 }, + { REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf }, + { REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 }, + { REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 }, + { REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 }, + { REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 }, + { REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 }, + { REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 }, + { REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 }, + { REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 }, + { REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 }, + { REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 }, + { REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 }, + { REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 }, + { REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 }, + { REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 }, + { REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004 }, + { REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002 }, + { REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 }, + { REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 }, + { REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 }, + { REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 }, + { REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 }, + { REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 }, + { REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 }, + { REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 }, + { REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 }, + { REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 }, + { REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 }, + { REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 }, + { REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222 }, + { REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 }, + { REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 }, + { REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 }, + { REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000 }, + { REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 }, + { REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222 }, + { REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 }, + { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002 }, + { REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 }, + { REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000223 }, + { REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 }, + { REG_A6XX_RBBM_ISDB_CNT, 0x00000182 }, + { REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 }, + { REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 }, + { REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 }, + { REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 }, + {}, +}; + +const struct adreno_reglist a740_hwcg[] = { + { REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 }, + { REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x22022222 }, + { REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x003cf3cf }, + { REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 }, + { REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 }, + { REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 }, + { REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 }, + { REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 }, + { REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 }, + { REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 }, + { REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 }, + { REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 }, + { REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 }, + { REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 }, + { REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 }, + { REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 }, + { REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 }, + { REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x00222222 }, + { REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000444 }, + { REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000222 }, + { REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 }, + { REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 }, + { REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 }, + { REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 }, + { REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 }, + { REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 }, + { REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 }, + { REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 }, + { REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 }, + { REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 }, + { REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 }, + { REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 }, + { REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00222222 }, + { REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 }, + { REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 }, + { REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 }, + { REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 }, + { REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 }, + { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 }, + { REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 }, + { REG_A7XX_RBBM_CLOCK_HYST2_VFD, 0x00000000 }, + { REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000222 }, + { REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 }, + { REG_A6XX_RBBM_ISDB_CNT, 0x00000182 }, + { REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 }, + { REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 }, + { REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 }, + { REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 }, + { REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 }, + {}, +}; + static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -702,7 +959,7 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) struct a6xx_gmu *gmu = &a6xx_gpu->gmu; const struct adreno_reglist *reg; unsigned int i; - u32 val, clock_cntl_on; + u32 val, clock_cntl_on, cgc_mode; if (!adreno_gpu->info->hwcg) return; @@ -714,6 +971,17 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) else clock_cntl_on = 0x8aa8aa82; + if (adreno_is_a7xx(adreno_gpu)) { + cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000; + + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, + state ? cgc_mode : 0); + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, + state ? 0x10111 : 0); + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, + state ? 0x5555 : 0); + } + val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL); /* Don't re-program the registers if they are already correct */ @@ -721,14 +989,14 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) return; /* Disable SP clock before programming HWCG registers */ - if (!adreno_is_a610(adreno_gpu)) + if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu)) gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++) gpu_write(gpu, reg->offset, state ? reg->value : 0); /* Enable SP clock */ - if (!adreno_is_a610(adreno_gpu)) + if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu)) gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0); @@ -897,6 +1165,59 @@ static const u32 a690_protect[] = { A6XX_PROTECT_NORDWR(0x11c00, 0x00000), /*note: infiite range */ }; +static const u32 a730_protect[] = { + A6XX_PROTECT_RDONLY(0x00000, 0x04ff), + A6XX_PROTECT_RDONLY(0x0050b, 0x0058), + A6XX_PROTECT_NORDWR(0x0050e, 0x0000), + A6XX_PROTECT_NORDWR(0x00510, 0x0000), + A6XX_PROTECT_NORDWR(0x00534, 0x0000), + A6XX_PROTECT_RDONLY(0x005fb, 0x009d), + A6XX_PROTECT_NORDWR(0x00699, 0x01e9), + A6XX_PROTECT_NORDWR(0x008a0, 0x0008), + A6XX_PROTECT_NORDWR(0x008ab, 0x0024), + /* 0x008d0-0x008dd are unprotected on purpose for tools like perfetto */ + A6XX_PROTECT_RDONLY(0x008de, 0x0154), + A6XX_PROTECT_NORDWR(0x00900, 0x004d), + A6XX_PROTECT_NORDWR(0x0098d, 0x00b2), + A6XX_PROTECT_NORDWR(0x00a41, 0x01be), + A6XX_PROTECT_NORDWR(0x00df0, 0x0001), + A6XX_PROTECT_NORDWR(0x00e01, 0x0000), + A6XX_PROTECT_NORDWR(0x00e07, 0x0008), + A6XX_PROTECT_NORDWR(0x03c00, 0x00c3), + A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff), + A6XX_PROTECT_NORDWR(0x08630, 0x01cf), + A6XX_PROTECT_NORDWR(0x08e00, 0x0000), + A6XX_PROTECT_NORDWR(0x08e08, 0x0000), + A6XX_PROTECT_NORDWR(0x08e50, 0x001f), + A6XX_PROTECT_NORDWR(0x08e80, 0x0280), + A6XX_PROTECT_NORDWR(0x09624, 0x01db), + A6XX_PROTECT_NORDWR(0x09e40, 0x0000), + A6XX_PROTECT_NORDWR(0x09e64, 0x000d), + A6XX_PROTECT_NORDWR(0x09e78, 0x0187), + A6XX_PROTECT_NORDWR(0x0a630, 0x01cf), + A6XX_PROTECT_NORDWR(0x0ae02, 0x0000), + A6XX_PROTECT_NORDWR(0x0ae50, 0x000f), + A6XX_PROTECT_NORDWR(0x0ae66, 0x0003), + A6XX_PROTECT_NORDWR(0x0ae6f, 0x0003), + A6XX_PROTECT_NORDWR(0x0b604, 0x0003), + A6XX_PROTECT_NORDWR(0x0ec00, 0x0fff), + A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff), + A6XX_PROTECT_NORDWR(0x18400, 0x0053), + A6XX_PROTECT_RDONLY(0x18454, 0x0004), + A6XX_PROTECT_NORDWR(0x18459, 0x1fff), + A6XX_PROTECT_NORDWR(0x1a459, 0x1fff), + A6XX_PROTECT_NORDWR(0x1c459, 0x1fff), + A6XX_PROTECT_NORDWR(0x1f400, 0x0443), + A6XX_PROTECT_RDONLY(0x1f844, 0x007b), + A6XX_PROTECT_NORDWR(0x1f860, 0x0000), + A6XX_PROTECT_NORDWR(0x1f878, 0x002a), + /* CP_PROTECT_REG[44, 46] are left untouched! */ + 0, + 0, + 0, + A6XX_PROTECT_NORDWR(0x1f8c0, 0x00000), +}; + static void a6xx_set_cp_protect(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -918,6 +1239,11 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) count = ARRAY_SIZE(a660_protect); count_max = 48; BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48); + } else if (adreno_is_a730(adreno_gpu) || adreno_is_a740(adreno_gpu)) { + regs = a730_protect; + count = ARRAY_SIZE(a730_protect); + count_max = 48; + BUILD_BUG_ON(ARRAY_SIZE(a730_protect) > 48); } else { regs = a6xx_protect; count = ARRAY_SIZE(a6xx_protect); @@ -984,7 +1310,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) if (adreno_is_a640_family(adreno_gpu)) amsbc = 1; - if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) { + if (adreno_is_a650(adreno_gpu) || + adreno_is_a660(adreno_gpu) || + adreno_is_a730(adreno_gpu) || + adreno_is_a740_family(adreno_gpu)) { /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */ hbb_lo = 3; amsbc = 1; @@ -1017,6 +1346,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) uavflagprd_inv << 4 | min_acc_len << 3 | hbb_lo << 1 | ubwc_mode); + if (adreno_is_a7xx(adreno_gpu)) + gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL, + FIELD_PREP(GENMASK(8, 5), hbb_lo)); + gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21); } @@ -1049,6 +1382,55 @@ static int a6xx_cp_init(struct msm_gpu *gpu) return a6xx_idle(gpu, ring) ? 0 : -EINVAL; } +static int a7xx_cp_init(struct msm_gpu *gpu) +{ + struct msm_ringbuffer *ring = gpu->rb[0]; + u32 mask; + + /* Disable concurrent binning before sending CP init */ + OUT_PKT7(ring, CP_THREAD_CONTROL, 1); + OUT_RING(ring, BIT(27)); + + OUT_PKT7(ring, CP_ME_INIT, 7); + + /* Use multiple HW contexts */ + mask = BIT(0); + + /* Enable error detection */ + mask |= BIT(1); + + /* Set default reset state */ + mask |= BIT(3); + + /* Disable save/restore of performance counters across preemption */ + mask |= BIT(6); + + /* Enable the register init list with the spinlock */ + mask |= BIT(8); + + OUT_RING(ring, mask); + + /* Enable multiple hardware contexts */ + OUT_RING(ring, 0x00000003); + + /* Enable error detection */ + OUT_RING(ring, 0x20000000); + + /* Operation mode mask */ + OUT_RING(ring, 0x00000002); + + /* *Don't* send a power up reg list for concurrent binning (TODO) */ + /* Lo address */ + OUT_RING(ring, 0x00000000); + /* Hi address */ + OUT_RING(ring, 0x00000000); + /* BIT(31) set => read the regs from the list */ + OUT_RING(ring, 0x00000000); + + a6xx_flush(gpu, ring); + return a6xx_idle(gpu, ring) ? 0 : -EINVAL; +} + /* * Check that the microcode version is new enough to include several key * security fixes. Return true if the ucode is safe. @@ -1065,6 +1447,10 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, if (IS_ERR(buf)) return false; + /* A7xx is safe! */ + if (adreno_is_a7xx(adreno_gpu)) + return true; + /* * Targets up to a640 (a618, a630 and a640) need to check for a * microcode version that is patched to support the whereami opcode or @@ -1181,22 +1567,46 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu) } #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \ - A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \ - A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ - A6XX_RBBM_INT_0_MASK_CP_IB2 | \ - A6XX_RBBM_INT_0_MASK_CP_IB1 | \ - A6XX_RBBM_INT_0_MASK_CP_RB | \ - A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ - A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \ - A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \ - A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ - A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR) + A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \ + A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ + A6XX_RBBM_INT_0_MASK_CP_IB2 | \ + A6XX_RBBM_INT_0_MASK_CP_IB1 | \ + A6XX_RBBM_INT_0_MASK_CP_RB | \ + A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ + A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \ + A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \ + A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ + A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR) + +#define A7XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \ + A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \ + A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \ + A6XX_RBBM_INT_0_MASK_CP_SW | \ + A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ + A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \ + A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \ + A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ + A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \ + A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \ + A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ + A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \ + A6XX_RBBM_INT_0_MASK_TSBWRITEERROR) + +#define A7XX_APRIV_MASK (A6XX_CP_APRIV_CNTL_ICACHE | \ + A6XX_CP_APRIV_CNTL_RBFETCH | \ + A6XX_CP_APRIV_CNTL_RBPRIVLEVEL | \ + A6XX_CP_APRIV_CNTL_RBRPWB) + +#define A7XX_BR_APRIVMASK (A7XX_APRIV_MASK | \ + A6XX_CP_APRIV_CNTL_CDREAD | \ + A6XX_CP_APRIV_CNTL_CDWRITE) static int hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + u64 gmem_range_min; int ret; if (!adreno_has_gmu_wrapper(adreno_gpu)) { @@ -1219,6 +1629,10 @@ static int hw_init(struct msm_gpu *gpu) mb(); } + /* Some GPUs are stubborn and take their sweet time to unhalt GBIF! */ + if (adreno_is_a7xx(adreno_gpu) && a6xx_has_gbif(adreno_gpu)) + spin_until(!gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK)); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); if (adreno_is_a619_holi(adreno_gpu)) @@ -1232,19 +1646,21 @@ static int hw_init(struct msm_gpu *gpu) gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000); gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); - /* Turn on 64 bit addressing for all blocks */ - gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); - gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); + if (!adreno_is_a7xx(adreno_gpu)) { + /* Turn on 64 bit addressing for all blocks */ + gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); + } /* enable hardware clockgating */ a6xx_set_hwcg(gpu, true); @@ -1252,12 +1668,14 @@ static int hw_init(struct msm_gpu *gpu) /* VBIF/GBIF start*/ if (adreno_is_a610(adreno_gpu) || adreno_is_a640_family(adreno_gpu) || - adreno_is_a650_family(adreno_gpu)) { + adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu)) { gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); - gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3); + gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, + adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3); } else { gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); } @@ -1265,24 +1683,39 @@ static int hw_init(struct msm_gpu *gpu) if (adreno_is_a630(adreno_gpu)) gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); + if (adreno_is_a7xx(adreno_gpu)) + gpu_write(gpu, REG_A6XX_UCHE_GBIF_GX_CONFIG, 0x10240e0); + /* Make all blocks contribute to the GPU BUSY perf counter */ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); /* Disable L2 bypass in the UCHE */ - gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu); - gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu); - gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu); + if (adreno_is_a7xx(adreno_gpu)) { + gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu); + gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu); + } else { + gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu); + gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu); + gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu); + } + + if (!(adreno_is_a650_family(adreno_gpu) || + adreno_is_a730(adreno_gpu))) { + gmem_range_min = adreno_is_a740_family(adreno_gpu) ? SZ_16M : SZ_1M; - if (!adreno_is_a650_family(adreno_gpu)) { /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ - gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, 0x00100000); + gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, gmem_range_min); gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX, - 0x00100000 + adreno_gpu->info->gmem - 1); + gmem_range_min + adreno_gpu->info->gmem - 1); } - gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); - gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); + if (adreno_is_a7xx(adreno_gpu)) + gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, BIT(23)); + else { + gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); + gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); + } if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) { gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); @@ -1290,7 +1723,7 @@ static int hw_init(struct msm_gpu *gpu) } else if (adreno_is_a610(adreno_gpu)) { gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060); gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16); - } else { + } else if (!adreno_is_a7xx(adreno_gpu)) { gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); } @@ -1302,7 +1735,7 @@ static int hw_init(struct msm_gpu *gpu) if (adreno_is_a610(adreno_gpu)) { gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48); gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47); - } else + } else if (!adreno_is_a7xx(adreno_gpu)) gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); /* Setting the primFifo thresholds default values, @@ -1318,7 +1751,7 @@ static int hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000); else if (adreno_is_a610(adreno_gpu)) gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000); - else + else if (!adreno_is_a7xx(adreno_gpu)) gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000); /* Set the AHB default slave response to "ERROR" */ @@ -1327,13 +1760,22 @@ static int hw_init(struct msm_gpu *gpu) /* Turn on performance counters */ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1); + if (adreno_is_a7xx(adreno_gpu)) { + /* Turn on the IFPC counter (countable 4 on XOCLK4) */ + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, + FIELD_PREP(GENMASK(7, 0), 0x4)); + } + /* Select CP0 to always count cycles */ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT); a6xx_set_ubwc_config(gpu); /* Enable fault detection */ - if (adreno_is_a619(adreno_gpu)) + if (adreno_is_a730(adreno_gpu) || + adreno_is_a740_family(adreno_gpu)) + gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff); + else if (adreno_is_a619(adreno_gpu)) gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff); else if (adreno_is_a610(adreno_gpu)) gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff); @@ -1373,15 +1815,31 @@ static int hw_init(struct msm_gpu *gpu) /* Set dualQ + disable afull for A660 GPU */ if (adreno_is_a660(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); + else if (adreno_is_a7xx(adreno_gpu)) + gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, + FIELD_PREP(GENMASK(19, 16), 6) | + FIELD_PREP(GENMASK(15, 12), 6) | + FIELD_PREP(GENMASK(11, 8), 9) | + BIT(3) | BIT(2) | + FIELD_PREP(GENMASK(1, 0), 2)); /* Enable expanded apriv for targets that support it */ if (gpu->hw_apriv) { - gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, - (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1)); + if (adreno_is_a7xx(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, + A7XX_BR_APRIVMASK); + gpu_write(gpu, REG_A7XX_CP_BV_APRIV_CNTL, + A7XX_APRIV_MASK); + gpu_write(gpu, REG_A7XX_CP_LPAC_APRIV_CNTL, + A7XX_APRIV_MASK); + } else + gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, + BIT(6) | BIT(5) | BIT(3) | BIT(2) | BIT(1)); } /* Enable interrupts */ - gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK); + gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, + adreno_is_a7xx(adreno_gpu) ? A7XX_INT_MASK : A6XX_INT_MASK); ret = adreno_hw_init(gpu); if (ret) @@ -1408,6 +1866,12 @@ static int hw_init(struct msm_gpu *gpu) shadowptr(a6xx_gpu, gpu->rb[0])); } + /* ..which means "always" on A7xx, also for BV shadow */ + if (adreno_is_a7xx(adreno_gpu)) { + gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR, + rbmemptr(gpu->rb[0], bv_fence)); + } + /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; @@ -1416,7 +1880,7 @@ static int hw_init(struct msm_gpu *gpu) /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); - ret = a6xx_cp_init(gpu); + ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu); if (ret) goto out; @@ -1653,7 +2117,7 @@ static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu) (val & 0x3ffff), val); } - if (status & A6XX_CP_INT_CP_AHB_ERROR) + if (status & A6XX_CP_INT_CP_AHB_ERROR && !adreno_is_a7xx(to_adreno_gpu(gpu))) dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR) @@ -1803,6 +2267,35 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); } +static void a7xx_llc_activate(struct a6xx_gpu *a6xx_gpu) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + + if (IS_ERR(a6xx_gpu->llc_mmio)) + return; + + if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { + u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + + gpu_scid &= GENMASK(4, 0); + + gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, + FIELD_PREP(GENMASK(29, 25), gpu_scid) | + FIELD_PREP(GENMASK(24, 20), gpu_scid) | + FIELD_PREP(GENMASK(19, 15), gpu_scid) | + FIELD_PREP(GENMASK(14, 10), gpu_scid) | + FIELD_PREP(GENMASK(9, 5), gpu_scid) | + FIELD_PREP(GENMASK(4, 0), gpu_scid)); + + gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, + FIELD_PREP(GENMASK(14, 10), gpu_scid) | + BIT(8)); + } + + llcc_slice_activate(a6xx_gpu->htw_llc_slice); +} + static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) { /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */ @@ -1814,7 +2307,7 @@ static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) } static void a6xx_llc_slices_init(struct platform_device *pdev, - struct a6xx_gpu *a6xx_gpu) + struct a6xx_gpu *a6xx_gpu, bool is_a7xx) { struct device_node *phandle; @@ -1823,18 +2316,18 @@ static void a6xx_llc_slices_init(struct platform_device *pdev, return; /* - * There is a different programming path for targets with an mmu500 - * attached, so detect if that is the case + * There is a different programming path for A6xx targets with an + * mmu500 attached, so detect if that is the case */ phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0); a6xx_gpu->have_mmu500 = (phandle && of_device_is_compatible(phandle, "arm,mmu-500")); of_node_put(phandle); - if (a6xx_gpu->have_mmu500) - a6xx_gpu->llc_mmio = NULL; - else + if (is_a7xx || !a6xx_gpu->have_mmu500) a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem"); + else + a6xx_gpu->llc_mmio = NULL; a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); @@ -1920,7 +2413,7 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu) msm_devfreq_resume(gpu); - a6xx_llc_activate(a6xx_gpu); + adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate : a6xx_llc_activate(a6xx_gpu); return ret; } @@ -2237,7 +2730,7 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i DRM_DEV_ERROR(dev, "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", speedbin); - return UINT_MAX; + supp_hw = BIT(0); /* Default */ } ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); @@ -2307,6 +2800,37 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = { .get_timestamp = a6xx_get_timestamp, }; +static const struct adreno_gpu_funcs funcs_a7xx = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a6xx_hw_init, + .ucode_load = a6xx_ucode_load, + .pm_suspend = a6xx_gmu_pm_suspend, + .pm_resume = a6xx_gmu_pm_resume, + .recover = a6xx_recover, + .submit = a7xx_submit, + .active_ring = a6xx_active_ring, + .irq = a6xx_irq, + .destroy = a6xx_destroy, +#if defined(CONFIG_DRM_MSM_GPU_STATE) + .show = a6xx_show, +#endif + .gpu_busy = a6xx_gpu_busy, + .gpu_get_freq = a6xx_gmu_get_freq, + .gpu_set_freq = a6xx_gpu_set_freq, +#if defined(CONFIG_DRM_MSM_GPU_STATE) + .gpu_state_get = a6xx_gpu_state_get, + .gpu_state_put = a6xx_gpu_state_put, +#endif + .create_address_space = a6xx_create_address_space, + .create_private_address_space = a6xx_create_private_address_space, + .get_rptr = a6xx_get_rptr, + .progress = a6xx_progress, + }, + .get_timestamp = a6xx_gmu_get_timestamp, +}; + struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; @@ -2316,6 +2840,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) struct a6xx_gpu *a6xx_gpu; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; + bool is_a7xx; int ret; a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL); @@ -2339,7 +2864,11 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_gpu->base.hw_apriv = !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV); - a6xx_llc_slices_init(pdev, a6xx_gpu); + /* gpu->info only gets assigned in adreno_gpu_init() */ + is_a7xx = config->info->family == ADRENO_7XX_GEN1 || + config->info->family == ADRENO_7XX_GEN2; + + a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx); ret = a6xx_set_supported_hw(&pdev->dev, config->info); if (ret) { @@ -2347,7 +2876,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } - if (adreno_has_gmu_wrapper(adreno_gpu)) + if (is_a7xx) + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1); + else if (adreno_has_gmu_wrapper(adreno_gpu)) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1); else ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); |