diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 134 |
1 files changed, 120 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d502e30f67d9..56fdbe626d30 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -112,6 +112,22 @@ #define mmCP_HYP_ME_UCODE_DATA 0x5817 #define mmCP_HYP_ME_UCODE_DATA_BASE_IDX 1 +//CC_GC_SA_UNIT_DISABLE +#define mmCC_GC_SA_UNIT_DISABLE 0x0fe9 +#define mmCC_GC_SA_UNIT_DISABLE_BASE_IDX 0 +#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8 +#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x0000FF00L +//GC_USER_SA_UNIT_DISABLE +#define mmGC_USER_SA_UNIT_DISABLE 0x0fea +#define mmGC_USER_SA_UNIT_DISABLE_BASE_IDX 0 +#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8 +#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x0000FF00L +//PA_SC_ENHANCE_3 +#define mmPA_SC_ENHANCE_3 0x1085 +#define mmPA_SC_ENHANCE_3_BASE_IDX 0 +#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3 +#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L + MODULE_FIRMWARE("amdgpu/navi10_ce.bin"); MODULE_FIRMWARE("amdgpu/navi10_pfp.bin"); MODULE_FIRMWARE("amdgpu/navi10_me.bin"); @@ -3091,6 +3107,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x10f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820), @@ -3188,6 +3205,8 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume); static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); +static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev); +static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { @@ -3560,7 +3579,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) break; } - if (adev->gfx.cp_fw_write_wait == false) + if (!adev->gfx.cp_fw_write_wait) DRM_WARN_ONCE("CP firmware version too old, please update!"); } @@ -3586,6 +3605,17 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev) le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); } +static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_2 *rlc_hdr; + + rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes); + adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes); + adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes); + adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes); +} + static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev) { bool ret = false; @@ -3610,6 +3640,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; break; + case CHIP_NAVY_FLOUNDER: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + break; default: break; } @@ -3698,8 +3731,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; version_major = le16_to_cpu(rlc_hdr->header.header_version_major); version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); - if (version_major == 2 && version_minor == 1) - adev->gfx.rlc.is_rlc_v2_1 = true; adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); @@ -3741,8 +3772,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); - if (adev->gfx.rlc.is_rlc_v2_1) - gfx_v10_0_init_rlc_ext_microcode(adev); + if (version_major == 2) { + if (version_minor >= 1) + gfx_v10_0_init_rlc_ext_microcode(adev); + if (version_minor == 2) + gfx_v10_0_init_rlc_iram_dram_microcode(adev); + } } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks); @@ -3803,8 +3838,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); } - if (adev->gfx.rlc.is_rlc_v2_1 && - adev->gfx.rlc.save_restore_list_cntl_size_bytes && + if (adev->gfx.rlc.save_restore_list_cntl_size_bytes && adev->gfx.rlc.save_restore_list_gpm_size_bytes && adev->gfx.rlc.save_restore_list_srm_size_bytes) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; @@ -3824,6 +3858,21 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) info->fw = adev->gfx.rlc_fw; adev->firmware.fw_size += ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); + + if (adev->gfx.rlc.rlc_iram_ucode_size_bytes && + adev->gfx.rlc.rlc_dram_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE); + } } info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; @@ -4533,12 +4582,17 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev) int i, j; u32 data; u32 active_rbs = 0; + u32 bitmap; u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + bitmap = i * adev->gfx.config.max_sh_per_se + j; + if ((adev->asic_type == CHIP_SIENNA_CICHLID) && + ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) + continue; gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v10_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * @@ -6947,6 +7001,9 @@ static int gfx_v10_0_hw_init(void *handle) if (r) return r; + if (adev->asic_type == CHIP_SIENNA_CICHLID) + gfx_v10_3_program_pbb_mode(adev); + return r; } @@ -6980,15 +7037,19 @@ static int gfx_v10_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + + if (!adev->in_pci_err_recovery) { #ifndef BRING_UP_DEBUG - if (amdgpu_async_gfx_ring) { - r = gfx_v10_0_kiq_disable_kgq(adev); - if (r) - DRM_ERROR("KGQ disable failed\n"); - } + if (amdgpu_async_gfx_ring) { + r = gfx_v10_0_kiq_disable_kgq(adev); + if (r) + DRM_ERROR("KGQ disable failed\n"); + } #endif - if (amdgpu_gfx_disable_kcq(adev)) - DRM_ERROR("KCQ disable failed\n"); + if (amdgpu_gfx_disable_kcq(adev)) + DRM_ERROR("KCQ disable failed\n"); + } + if (amdgpu_sriov_vf(adev)) { gfx_v10_0_cp_gfx_enable(adev, false); /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ @@ -8756,6 +8817,10 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + bitmap = i * adev->gfx.config.max_sh_per_se + j; + if ((adev->asic_type == CHIP_SIENNA_CICHLID) && + ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) + continue; mask = 1; ao_bitmap = 0; counter = 0; @@ -8790,6 +8855,47 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, return 0; } +static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev) +{ + uint32_t efuse_setting, vbios_setting, disabled_sa, max_sa_mask; + + efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE); + efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK; + efuse_setting >>= CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT; + + vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SA_UNIT_DISABLE); + vbios_setting &= GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK; + vbios_setting >>= GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT; + + max_sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * + adev->gfx.config.max_shader_engines); + disabled_sa = efuse_setting | vbios_setting; + disabled_sa &= max_sa_mask; + + return disabled_sa; +} + +static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev) +{ + uint32_t max_sa_per_se, max_sa_per_se_mask, max_shader_engines; + uint32_t disabled_sa_mask, se_index, disabled_sa_per_se; + + disabled_sa_mask = gfx_v10_3_get_disabled_sa(adev); + + max_sa_per_se = adev->gfx.config.max_sh_per_se; + max_sa_per_se_mask = (1 << max_sa_per_se) - 1; + max_shader_engines = adev->gfx.config.max_shader_engines; + + for (se_index = 0; max_shader_engines > se_index; se_index++) { + disabled_sa_per_se = disabled_sa_mask >> (se_index * max_sa_per_se); + disabled_sa_per_se &= max_sa_per_se_mask; + if (disabled_sa_per_se == max_sa_per_se_mask) { + WREG32_FIELD15(GC, 0, PA_SC_ENHANCE_3, FORCE_PBB_WORKLOAD_MODE_TO_ZERO, 1); + break; + } + } +} + const struct amdgpu_ip_block_version gfx_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, |