summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c75
1 files changed, 39 insertions, 36 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index fe7c48f2fb2a..2523221a2519 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -513,7 +513,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
if (!ring->is_mes_queue)
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
if (!ring->is_mes_queue)
@@ -537,6 +537,7 @@ static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *
int err = 0;
err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_toc.bin", ucode_prefix);
if (err)
goto out;
@@ -566,6 +567,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", ucode_prefix);
if (err)
goto out;
@@ -573,6 +575,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", ucode_prefix);
if (err)
goto out;
@@ -581,6 +584,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
@@ -593,6 +597,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", ucode_prefix);
if (err)
goto out;
@@ -1347,6 +1352,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if (adev->gfx.me_fw_version >= 2480 &&
+ adev->gfx.pfp_fw_version >= 2530 &&
+ adev->gfx.mec_fw_version >= 2680 &&
+ adev->mes.fw_version[0] >= 100)
+ adev->gfx.enable_cleaner_shader = true;
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -1437,11 +1450,19 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->gfx.gfx_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if ((adev->gfx.me_fw_version >= 2660) &&
+ (adev->gfx.mec_fw_version >= 2920)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
+ }
if (!adev->enable_mes_kiq) {
r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0);
@@ -1610,6 +1631,7 @@ static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev)
static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
{
+ u32 rb_bitmap_per_sa;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
@@ -1627,12 +1649,14 @@ static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
+
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
- active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -2832,9 +2856,7 @@ static void gfx_v12_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v12_0_cp_set_doorbell_range(struct amdgpu_device *adev)
@@ -3864,10 +3886,10 @@ static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
}
#endif
-static int gfx_v12_0_set_powergating_state(void *handle,
+static int gfx_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -3999,17 +4021,6 @@ static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
if (def != data)
WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
-
- data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
- data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
-
- /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
- if (adev->sdma.num_instances > 1) {
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
- }
}
}
@@ -4115,15 +4126,15 @@ static int gfx_v12_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v12_0_set_clockgating_state(void *handle,
+static int gfx_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->ip_versions[GC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
gfx_v12_0_update_gfx_clock_gating(adev,
@@ -5233,24 +5244,16 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int r, i;
+ int r;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- mutex_lock(&adev->srbm_mutex);
- soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
- WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
- break;
- udelay(1);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
+ if (r) {
+ dev_err(adev->dev, "reset via MMIO failed %d\n", r);
+ return r;
}
- soc24_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0)) {