diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
53 files changed, 568 insertions, 312 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index aeeec211861c..fd6e83795873 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1092,16 +1092,20 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) * S0ix even though the system is suspending to idle, so return false * in that case. */ - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - dev_warn_once(adev->dev, + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { + dev_err_once(adev->dev, "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); + return false; + } #if !IS_ENABLED(CONFIG_AMD_PMC) - dev_warn_once(adev->dev, + dev_err_once(adev->dev, "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); -#endif /* CONFIG_AMD_PMC */ + return false; +#else return true; +#endif /* CONFIG_AMD_PMC */ } #endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 981a9cfb63b5..5c7d40873ee2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3757,6 +3757,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a + * internal path natively support atomics, set have_atomics_support to true. + */ + else if ((adev->flags & AMD_IS_APU) && + (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) + adev->have_atomics_support = true; else adev->have_atomics_support = !pci_enable_atomic_ops_to_root(adev->pdev, @@ -4506,7 +4512,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) dev_info(adev->dev, "recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { - shadow = &vmbo->bo; + /* If vm is compute context or adev is APU, shadow will be NULL */ + if (!vmbo->shadow) + continue; + shadow = vmbo->shadow; + /* No need to recover an evicted BO */ if (shadow->tbo.resource->mem_type != TTM_PL_TT || shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index f52d0ba91a77..a7d250809da9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -582,7 +582,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) if (r) amdgpu_fence_driver_force_completion(ring); - if (ring->fence_drv.irq_src) + if (!drm_dev_is_unplugged(adev_to_drm(adev)) && + ring->fence_drv.irq_src) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9d3a0542c996..f3f541ba0aca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -687,9 +687,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; - r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); - if (r) - goto late_fini; + if (adev->gfx.cp_ecc_error_irq.funcs) { + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (r) + goto late_fini; + } } else { amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 4e2531758866..95b0f984acbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -593,6 +593,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(9, 3, 0): /* GC 10.3.7 */ case IP_VERSION(10, 3, 7): + /* GC 11.0.1 */ + case IP_VERSION(11, 0, 1): if (amdgpu_tmz == 0) { adev->gmc.tmz_enabled = false; dev_info(adev->dev, @@ -616,7 +618,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(10, 3, 1): /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): - case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): /* Don't enable it by default yet. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index b07c000fc8ba..4fa019c8aefc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -241,6 +241,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, return 0; } +int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r, i; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + if (amdgpu_ras_is_supported(adev, ras_block->block)) { + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); + if (r) + goto late_fini; + } + } + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + return r; +} + int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) { int err; @@ -262,7 +287,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) adev->jpeg.ras_if = &ras->ras_block.ras_comm; if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 0ca76f0f23e9..1471a1ebb034 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -38,6 +38,7 @@ struct amdgpu_jpeg_reg{ struct amdgpu_jpeg_inst { struct amdgpu_ring ring_dec; struct amdgpu_irq_src irq; + struct amdgpu_irq_src ras_poison_irq; struct amdgpu_jpeg_reg external; }; @@ -72,6 +73,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block); int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 2bd1a54ee866..3b225be89cb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; struct amdgpu_bo_vm *vmbo; + bo = shadow_bo->parent; vmbo = to_amdgpu_bo_vm(bo); /* in case amdgpu_device_recover_vram got NULL of bo->parent */ if (!list_empty(&vmbo->shadow_list)) { @@ -694,11 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, return r; *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); - INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); - /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list - * is initialized. - */ - bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy; return r; } @@ -715,6 +711,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) mutex_lock(&adev->shadow_list_lock); list_add_tail(&vmbo->shadow_list, &adev->shadow_list); + vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo); + vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; mutex_unlock(&adev->shadow_list_lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e63fcc58e8e0..2d94f1b63bd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1181,6 +1181,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, return 0; } +int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r, i; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + if (amdgpu_ras_is_supported(adev, ras_block->block)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); + if (r) + goto late_fini; + } + } + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + return r; +} + int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) { int err; @@ -1202,7 +1227,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) adev->vcn.ras_if = &ras->ras_block.ras_comm; if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index c730949ece7d..f1397ef66fd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -234,6 +234,7 @@ struct amdgpu_vcn_inst { struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; atomic_t sched_score; struct amdgpu_irq_src irq; + struct amdgpu_irq_src ras_poison_irq; struct amdgpu_vcn_reg external; struct amdgpu_bo *dpg_sram_bo; struct dpg_pause_state pause_state; @@ -400,6 +401,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block); int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index df63dc3bca18..051c7194ab4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); amdgpu_bo_add_to_shadow_list(*vmbo); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 43d6a9d6a538..afacfb9b5bf6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -800,7 +800,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; + struct amdgpu_vram_reservation *rsv; drm_printf(printer, " vis usage:%llu\n", amdgpu_vram_mgr_vis_usage(mgr)); @@ -812,8 +812,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, drm_buddy_print(mm, printer); drm_printf(printer, "reserved:\n"); - list_for_each_entry(block, &mgr->reserved_pages, link) - drm_buddy_block_print(mm, block, printer); + list_for_each_entry(rsv, &mgr->reserved_pages, blocks) + drm_printf(printer, "%#018llx-%#018llx: %llu\n", + rsv->start, rsv->start + rsv->size, rsv->size); mutex_unlock(&mgr->lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f5b5ce1051a2..ab44c1391d52 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6892,8 +6892,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(ring->mqd_obj); return r; + } gfx_v10_0_kiq_init_queue(ring); amdgpu_bo_kunmap(ring->mqd_obj); @@ -8152,8 +8154,14 @@ static int gfx_v10_0_set_powergating_state(void *handle, case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 6): case IP_VERSION(10, 3, 7): + if (!enable) + amdgpu_gfx_off_ctrl(adev, false); + gfx_v10_cntl_pg(adev, enable); - amdgpu_gfx_off_ctrl(adev, enable); + + if (enable) + amdgpu_gfx_off_ctrl(adev, true); + break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index a9da0486467a..c4940b6ea1c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1315,13 +1315,6 @@ static int gfx_v11_0_sw_init(void *handle) if (r) return r; - /* ECC error */ - r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, - GFX_11_0_0__SRCID__CP_ECC_ERROR, - &adev->gfx.cp_ecc_error_irq); - if (r) - return r; - /* FED error */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, @@ -4444,7 +4437,6 @@ static int gfx_v11_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4675,24 +4667,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) uint64_t clock; uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; - amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); if (amdgpu_sriov_vf(adev)) { + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); if (clock_counter_hi_pre != clock_counter_hi_after) clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); } else { + preempt_disable(); clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); if (clock_counter_hi_pre != clock_counter_hi_after) clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + preempt_enable(); } clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); + return clock; } @@ -5158,8 +5153,14 @@ static int gfx_v11_0_set_powergating_state(void *handle, break; case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): + if (!enable) + amdgpu_gfx_off_ctrl(adev, false); + gfx_v11_cntl_pg(adev, enable); - amdgpu_gfx_off_ctrl(adev, enable); + + if (enable) + amdgpu_gfx_off_ctrl(adev, true); + break; default: break; @@ -5897,36 +5898,6 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev } } -#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1 -#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \ - do { \ - uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \ - tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \ - WREG32_SOC15_IP(GC, reg_addr, tmp); \ - } while (0) - -static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - uint32_t ecc_irq_state = 0; - uint32_t pipe0_int_cntl_addr = 0; - int i = 0; - - ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0; - - pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); - - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state); - - for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) - SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL, - ecc_irq_state); - - return 0; -} - static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -6341,11 +6312,6 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { .process = gfx_v11_0_priv_inst_irq, }; -static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = { - .set = gfx_v11_0_set_cp_ecc_error_state, - .process = amdgpu_gfx_cp_ecc_error_irq, -}; - static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { .process = gfx_v11_0_rlc_gc_fed_irq, }; @@ -6361,9 +6327,6 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; - adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */ - adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs; - adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index adbcd8127c82..e7f2b7bf0ff5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -149,16 +149,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin"); #define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 -#define mmGOLDEN_TSC_COUNT_UPPER_Raven 0x007a -#define mmGOLDEN_TSC_COUNT_UPPER_Raven_BASE_IDX 0 -#define mmGOLDEN_TSC_COUNT_LOWER_Raven 0x007b -#define mmGOLDEN_TSC_COUNT_LOWER_Raven_BASE_IDX 0 - -#define mmGOLDEN_TSC_COUNT_UPPER_Raven2 0x0068 -#define mmGOLDEN_TSC_COUNT_UPPER_Raven2_BASE_IDX 0 -#define mmGOLDEN_TSC_COUNT_LOWER_Raven2 0x0069 -#define mmGOLDEN_TSC_COUNT_LOWER_Raven2_BASE_IDX 0 - enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -3617,8 +3607,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(ring->mqd_obj); return r; + } gfx_v9_0_kiq_init_queue(ring); amdgpu_bo_kunmap(ring->mqd_obj); @@ -3764,7 +3756,8 @@ static int gfx_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4001,36 +3994,6 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) preempt_enable(); clock = clock_lo | (clock_hi << 32ULL); break; - case IP_VERSION(9, 1, 0): - preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ - if (hi_check != clock_hi) { - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - clock_hi = hi_check; - } - preempt_enable(); - clock = clock_lo | (clock_hi << 32ULL); - break; - case IP_VERSION(9, 2, 2): - preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ - if (hi_check != clock_hi) { - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - clock_hi = hi_check; - } - preempt_enable(); - clock = clock_lo | (clock_hi << 32ULL); - break; default: amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index d95f9fe8f1c5..4116c112e8a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -31,6 +31,8 @@ #include "umc_v8_10.h" #include "athub/athub_3_0_0_sh_mask.h" #include "athub/athub_3_0_0_offset.h" +#include "dcn/dcn_3_2_0_offset.h" +#include "dcn/dcn_3_2_0_sh_mask.h" #include "oss/osssys_6_0_0_offset.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "navi10_enum.h" @@ -546,7 +548,24 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) { - return 0; + u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = AMDGPU_VBIOS_VGA_ALLOCATION; + } else { + u32 viewport; + u32 pitch; + + viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); + pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * + 4); + } + + return size; } static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index b040f51d9aa9..73e0dc5a10cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle) /* JPEG DJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; /* JPEG EJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; } @@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle) if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); } return 0; @@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__JPEG_DECODE: amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); break; - case VCN_2_6__SRCID_DJPEG0_POISON: - case VCN_2_6__SRCID_EJPEG0_POISON: - amdgpu_jpeg_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { .process = jpeg_v2_5_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = { + .set = jpeg_v2_6_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) adev->jpeg.inst[i].irq.num_types = 1; adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; + + adev->jpeg.inst[i].ras_poison_irq.num_types = 1; + adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs; } } @@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = { static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { .ras_block = { .hw_ops = &jpeg_v2_6_ras_hw_ops, + .ras_late_init = amdgpu_jpeg_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index c55e09432e26..1c2292cc5f2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle) switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 1, 1): + case IP_VERSION(3, 1, 2): break; default: harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 77e1e64aa1d1..a3d83c9f2c9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle) /* JPEG DJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); + VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); if (r) return r; /* JPEG EJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); + VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); if (r) return r; @@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); } - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); return 0; } @@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, case VCN_4_0__SRCID__JPEG_DECODE: amdgpu_fence_process(&adev->jpeg.inst->ring_dec); break; - case VCN_4_0__SRCID_DJPEG0_POISON: - case VCN_4_0__SRCID_EJPEG0_POISON: - amdgpu_jpeg_process_poison_irq(adev, source, entry); - break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { .process = jpeg_v4_0_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = { + .set = jpeg_v4_0_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev) { adev->jpeg.inst->irq.num_types = 1; adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs; + + adev->jpeg.inst->ras_poison_irq.num_types = 1; + adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs; } const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = { @@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = { static struct amdgpu_jpeg_ras jpeg_v4_0_ras = { .ras_block = { .hw_ops = &jpeg_v4_0_ras_hw_ops, + .ras_late_init = amdgpu_jpeg_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 98c826f1f89b..0fb6013441f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -98,6 +98,16 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = }; /* Sienna Cichlid */ +static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs sc_video_codecs_encode = { + .codec_count = ARRAY_SIZE(sc_video_codecs_encode_array), + .codec_array = sc_video_codecs_encode_array, +}; + static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, @@ -136,8 +146,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = /* SRIOV Sienna Cichlid, not const since data is controlled by host */ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, }; static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = @@ -237,12 +247,12 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, } else { if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn1; } else { if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn0; } @@ -251,14 +261,14 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, case IP_VERSION(3, 0, 16): case IP_VERSION(3, 0, 2): if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn0; return 0; case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 2): if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &yc_video_codecs_decode; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index e1b7fca09666..5f10883da6a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -57,7 +57,13 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) if (err) return err; - return psp_init_ta_microcode(psp, ucode_prefix); + err = psp_init_ta_microcode(psp, ucode_prefix); + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) && + (adev->pdev->revision == 0xa1) && + (psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) { + adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0; + } + return err; } static int psp_v10_0_ring_create(struct psp_context *psp, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index b3cc04dd8653..9295ac7edd56 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1917,9 +1917,11 @@ static int sdma_v4_0_hw_fini(void *handle) return 0; } - for (i = 0; i < adev->sdma.num_instances; i++) { - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + } } sdma_v4_0_ctx_switch_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 6d15d5cd9e07..a2fd1ffadb59 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -301,10 +301,11 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) return 10000; + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) + return reference_clock / 4; return reference_clock; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 744be2a05623..d77162536514 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -711,7 +711,7 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_JPEG; - adev->external_rev_id = adev->rev_id + 0x1; + adev->external_rev_id = adev->rev_id + 0x80; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index ab0b45d0ead1..515681c88dcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle) /* VCN POISON TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], - VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq); + VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq); if (r) return r; } @@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle) (adev->vcn.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, i, mmUVD_STATUS))) vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); } return 0; @@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); break; - case VCN_2_6__SRCID_UVD_POISON: - amdgpu_vcn_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { .process = vcn_v2_5_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = { + .set = vcn_v2_6_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) continue; adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; + + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs; } } @@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = { static struct amdgpu_vcn_ras vcn_v2_6_ras = { .ras_block = { .hw_ops = &vcn_v2_6_ras_hw_ops, + .ras_late_init = amdgpu_vcn_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index bf0674039598..e5fd1e00914d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -139,7 +139,7 @@ static int vcn_v4_0_sw_init(void *handle) /* VCN POISON TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], - VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq); + VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq); if (r) return r; @@ -305,8 +305,8 @@ static int vcn_v4_0_hw_fini(void *handle) vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); } } - - amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); } return 0; @@ -1976,6 +1976,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp } /** + * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @type: interrupt types + * @state: interrupt states + * + * Set VCN block RAS interrupt state + */ +static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +/** * vcn_v4_0_process_interrupt - process VCN block interrupt * * @adev: amdgpu_device pointer @@ -2007,9 +2025,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); break; - case VCN_4_0__SRCID_UVD_POISON: - amdgpu_vcn_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2024,6 +2039,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { .process = vcn_v4_0_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = { + .set = vcn_v4_0_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + /** * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions * @@ -2041,6 +2061,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs; + + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs; } } @@ -2114,6 +2137,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = { static struct amdgpu_vcn_ras vcn_v4_0_ras = { .ras_block = { .hw_ops = &vcn_v4_0_ras_hw_ops, + .ras_late_init = amdgpu_vcn_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 531f173ade2d..c0360dbebd4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; - if (adev->flags & AMD_IS_APU) - return reference_clock; + if (adev->flags & AMD_IS_APU) { + switch (adev->asic_type) { + case CHIP_STONEY: + /* vbios says 48Mhz, but the actual freq is 100Mhz */ + return 10000; + default: + return reference_clock; + } + } tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8b4b186c57f5..d5cec03eaa8d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2479,20 +2479,25 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, if (acrtc && state->stream_status[i].plane_count != 0) { irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; - DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", - acrtc->crtc_id, enable ? "en" : "dis", rc); if (rc) DRM_WARN("Failed to %s pflip interrupts\n", enable ? "enable" : "disable"); if (enable) { - rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base); - if (rc) - DRM_WARN("Failed to enable vblank interrupts\n"); - } else { - amdgpu_dm_crtc_disable_vblank(&acrtc->base); - } + if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) + rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); + } else + rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); + if (rc) + DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); + + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + /* During gpu-reset we disable and then enable vblank irq, so + * don't use amdgpu_irq_get/put() to avoid refcount change. + */ + if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) + DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); } } @@ -2852,7 +2857,7 @@ static int dm_resume(void *handle) * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->dc_link->type == dc_connection_mst_branch) + if (aconnector && aconnector->mst_root) continue; mutex_lock(&aconnector->hpd_lock); @@ -6737,7 +6742,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, int clock, bpp = 0; bool is_y420 = false; - if (!aconnector->mst_output_port || !aconnector->dc_sink) + if (!aconnector->mst_output_port) return 0; mst_port = aconnector->mst_output_port; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index e3762e806617..440fc0869a34 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -146,7 +146,6 @@ static void vblank_control_worker(struct work_struct *work) static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) { - enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); @@ -169,18 +168,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) if (rc) return rc; - if (amdgpu_in_reset(adev)) { - irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; - /* During gpu-reset we disable and then enable vblank irq, so - * don't use amdgpu_irq_get/put() to avoid refcount change. - */ - if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - rc = -EBUSY; - } else { - rc = (enable) - ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) - : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); - } + rc = (enable) + ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) + : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); if (rc) return rc; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 52564b93f7eb..7cde67b7f0c3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1981,6 +1981,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c return result; } +static bool commit_minimal_transition_state(struct dc *dc, + struct dc_state *transition_base_context); + /** * dc_commit_streams - Commit current stream state * @@ -2002,6 +2005,8 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_state *context; enum dc_status res = DC_OK; struct dc_validation_set set[MAX_STREAMS] = {0}; + struct pipe_ctx *pipe; + bool handle_exit_odm2to1 = false; if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) return res; @@ -2026,6 +2031,22 @@ enum dc_status dc_commit_streams(struct dc *dc, } } + /* Check for case where we are going from odm 2:1 to max + * pipe scenario. For these cases, we will call + * commit_minimal_transition_state() to exit out of odm 2:1 + * first before processing new streams + */ + if (stream_count == dc->res_pool->pipe_count) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->next_odm_pipe) + handle_exit_odm2to1 = true; + } + } + + if (handle_exit_odm2to1) + res = commit_minimal_transition_state(dc, dc->current_state); + context = dc_create_state(dc); if (!context) goto context_alloc_fail; @@ -3872,6 +3893,7 @@ static bool commit_minimal_transition_state(struct dc *dc, unsigned int i, j; unsigned int pipe_in_use = 0; bool subvp_in_use = false; + bool odm_in_use = false; if (!transition_context) return false; @@ -3900,6 +3922,18 @@ static bool commit_minimal_transition_state(struct dc *dc, } } + /* If ODM is enabled and we are adding or removing planes from any ODM + * pipe, we must use the minimal transition. + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->next_odm_pipe) { + odm_in_use = true; + break; + } + } + /* When the OS add a new surface if we have been used all of pipes with odm combine * and mpc split feature, it need use commit_minimal_transition_state to transition safely. * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need @@ -3908,7 +3942,7 @@ static bool commit_minimal_transition_state(struct dc *dc, * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially * enter/exit MPO when DCN still have enough resources. */ - if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) { + if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) { dc_release_state(transition_context); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 117d80cb36fb..fe1551393b26 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1446,6 +1446,26 @@ static int acquire_first_split_pipe( split_pipe->stream = stream; return i; + } else if (split_pipe->prev_odm_pipe && + split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) { + split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe; + if (split_pipe->next_odm_pipe) + split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe; + + if (split_pipe->prev_odm_pipe->plane_state) + resource_build_scaling_params(split_pipe->prev_odm_pipe); + + memset(split_pipe, 0, sizeof(*split_pipe)); + split_pipe->stream_res.tg = pool->timing_generators[i]; + split_pipe->plane_res.hubp = pool->hubps[i]; + split_pipe->plane_res.ipp = pool->ipps[i]; + split_pipe->plane_res.dpp = pool->dpps[i]; + split_pipe->stream_res.opp = pool->opps[i]; + split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst; + split_pipe->pipe_idx = i; + + split_pipe->stream = stream; + return i; } } return -1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 422fbf79da64..5403e9399a46 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2113,15 +2113,6 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - dc_dmub_srv_p_state_delegate(dc, - true, context); - context->bw_ctx.bw.dcn.clk.p_state_change_support = true; - dc->clk_mgr->clks.fw_based_mclk_switching = true; - } else { - dc->clk_mgr->clks.fw_based_mclk_switching = false; - } - dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 8263a07f265f..32121db2851e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -983,36 +983,13 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, } void dcn30_prepare_bandwidth(struct dc *dc, - struct dc_state *context) + struct dc_state *context) { - bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; - /* Any transition into an FPO config should disable MCLK switching first to avoid - * driver and FW P-State synchronization issues. - */ - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { - dc->optimized_required = true; - context->bw_ctx.bw.dcn.clk.p_state_change_support = false; - } - if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - /* - * enabled -> enabled: do not disable - * enabled -> disabled: disable - * disabled -> enabled: don't care - * disabled -> disabled: don't care - */ - if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) - dc_dmub_srv_p_state_delegate(dc, false, context); - - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { - /* After disabling P-State, restore the original value to ensure we get the correct P-State - * on the next optimize. */ - context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 40c488b26901..cc3fe9cac5b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -423,3 +423,68 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool PERF_TRACE(); } +static void apply_symclk_on_tx_off_wa(struct dc_link *link) +{ + /* There are use cases where SYMCLK is referenced by OTG. For instance + * for TMDS signal, OTG relies SYMCLK even if TX video output is off. + * However current link interface will power off PHY when disabling link + * output. This will turn off SYMCLK generated by PHY. The workaround is + * to identify such case where SYMCLK is still in use by OTG when we + * power off PHY. When this is detected, we will temporarily power PHY + * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling + * program_pix_clk interface. When OTG is disabled, we will then power + * off PHY by calling disable link output again. + * + * In future dcn generations, we plan to rework transmitter control + * interface so that we could have an option to set SYMCLK ON TX OFF + * state in one step without this workaround + */ + + struct dc *dc = link->ctx->dc; + struct pipe_ctx *pipe_ctx = NULL; + uint8_t i; + + if (link->phy_state.symclk_ref_cnts.otg > 0) { + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { + pipe_ctx->clock_source->funcs->program_pix_clk( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + dc->link_srv->dp_get_encoding_format( + &pipe_ctx->link_config.dp_link_settings), + &pipe_ctx->pll_settings); + link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; + break; + } + } + } +} + +void dcn314_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct dmcu *dmcu = dc->res_pool->dmcu; + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + + link_hwss->disable_link_output(link, link_res, signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + /* + * Add the logic to extract BOTH power up and power down sequences + * from enable/disable link output and only call edp panel control + * in enable_link_dp and disable_link_dp once. + */ + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->unlock_phy(dmcu); + dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + + apply_symclk_on_tx_off_wa(link); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index c786d5e6a428..6d0b62503caa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -45,4 +45,6 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); +void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal); + #endif /* __DC_HWSS_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 5267e901a35c..a588f46b166f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -105,7 +105,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, + .disable_link_output = dcn314_disable_link_output, .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 47beb4ea779d..0c4c3208def1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -138,7 +138,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_sdp_bw_after_urgent = 100.0, + .pct_ideal_sdp_bw_after_urgent = 90.0, .pct_ideal_fabric_bw_after_urgent = 67.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 13c7e7394b1c..d75248b6cae9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -810,7 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SwathHeightY[k], v->SwathHeightC[k], TWait, - v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ? + (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ || + v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->DSTXAfterScaler[k], @@ -3310,7 +3311,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->swath_width_chroma_ub_this_state[k], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->TWait, - v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ? + (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index 500b3dd6052d..d98e36a9a09c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -53,6 +53,7 @@ #define BPP_BLENDED_PIPE 0xffffffff #define MEM_STROBE_FREQ_MHZ 1600 +#define MIN_DCFCLK_FREQ_MHZ 200 #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0 struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index d4b7da526f0a..e8b2fc4002a5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -359,5 +359,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un link[i] = stream[i].link; bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing); } + + ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 300e156b924f..078aaaa53162 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -36,6 +36,8 @@ #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) +#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) + int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1460,15 +1462,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - struct smu_context *smu = adev->powerplay.pp_handle; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = adev->powerplay.pp_handle; - if ((is_support_sw_smu(adev) && smu->od_enabled) || - (is_support_sw_smu(adev) && smu->is_apu) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) - return true; + return (smu->od_enabled || smu->is_apu); + } else { + struct pp_hwmgr *hwmgr; - return false; + /* + * dpm on some legacy asics don't carry od_enabled member + * as its pp_handle is casted directly from adev. + */ + if (amdgpu_dpm_is_legacy_dpm(adev)) + return false; + + hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; + + return hwmgr->od_enabled; + } } int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 58c2246918fd..f4f40459f22b 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -871,13 +871,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, } if (ret == -ENOENT) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); - if (size > 0) { - size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); - } + size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); } if (size == 0) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index d6d9e3b1b2c0..02e69ccff3ba 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) return 0; } -static int si_set_temperature_range(struct amdgpu_device *adev) -{ - int ret; - - ret = si_thermal_enable_alert(adev, false); - if (ret) - return ret; - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - - return ret; -} - static void si_dpm_disable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); @@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, static int si_dpm_late_init(void *handle) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - ret = si_set_temperature_range(adev); - if (ret) - return ret; -#if 0 //TODO ? - si_dpm_powergate_uvd(adev, true); -#endif return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 5633c5797e85..2ddf5198e5c4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -733,6 +733,24 @@ static int smu_late_init(void *handle) return ret; } + /* + * Explicitly notify PMFW the power mode the system in. Since + * the PMFW may boot the ASIC with a different mode. + * For those supporting ACDC switch via gpio, PMFW will + * handle the switch automatically. Driver involvement + * is unnecessary. + */ + if (!smu->dc_controlled_by_gpio) { + ret = smu_set_power_source(smu, + adev->pm.ac_power ? SMU_POWER_SOURCE_AC : + SMU_POWER_SOURCE_DC); + if (ret) { + dev_err(adev->dev, "Failed to switch to %s mode!\n", + adev->pm.ac_power ? "AC" : "DC"); + return ret; + } + } + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index c4000518dc56..275f708db636 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3413,26 +3413,8 @@ static int navi10_post_smu_init(struct smu_context *smu) return 0; ret = navi10_run_umc_cdr_workaround(smu); - if (ret) { + if (ret) dev_err(adev->dev, "Failed to apply umc cdr workaround!\n"); - return ret; - } - - if (!smu->dc_controlled_by_gpio) { - /* - * For Navi1X, manually switch it to AC mode as PMFW - * may boot it with DC mode. - */ - ret = smu_v11_0_set_power_source(smu, - adev->pm.ac_power ? - SMU_POWER_SOURCE_AC : - SMU_POWER_SOURCE_DC); - if (ret) { - dev_err(adev->dev, "Failed to switch to %s mode!\n", - adev->pm.ac_power ? "AC" : "DC"); - return ret; - } - } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 75f18681e984..85d53597eb07 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2067,33 +2067,94 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context return ret; } +static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu, + uint32_t *gen_speed_override, + uint32_t *lane_width_override) +{ + struct amdgpu_device *adev = smu->adev; + + *gen_speed_override = 0xff; + *lane_width_override = 0xff; + + switch (adev->pdev->device) { + case 0x73A0: + case 0x73A1: + case 0x73A2: + case 0x73A3: + case 0x73AB: + case 0x73AE: + /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ + *lane_width_override = 6; + break; + case 0x73E0: + case 0x73E1: + case 0x73E3: + *lane_width_override = 4; + break; + case 0x7420: + case 0x7421: + case 0x7422: + case 0x7423: + case 0x7424: + *lane_width_override = 3; + break; + default: + break; + } +} + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap) { struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; - - uint32_t smu_pcie_arg; + struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; + uint32_t gen_speed_override, lane_width_override; uint8_t *table_member1, *table_member2; + uint32_t min_gen_speed, max_gen_speed; + uint32_t min_lane_width, max_lane_width; + uint32_t smu_pcie_arg; int ret, i; GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); - /* lclk dpm table setup */ - for (i = 0; i < MAX_PCIE_CONF; i++) { - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i]; - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i]; + sienna_cichlid_get_override_pcie_settings(smu, + &gen_speed_override, + &lane_width_override); + + /* PCIE gen speed override */ + if (gen_speed_override != 0xff) { + min_gen_speed = MIN(pcie_gen_cap, gen_speed_override); + max_gen_speed = MIN(pcie_gen_cap, gen_speed_override); + } else { + min_gen_speed = MAX(0, table_member1[0]); + max_gen_speed = MIN(pcie_gen_cap, table_member1[1]); + min_gen_speed = min_gen_speed > max_gen_speed ? + max_gen_speed : min_gen_speed; } + pcie_table->pcie_gen[0] = min_gen_speed; + pcie_table->pcie_gen[1] = max_gen_speed; + + /* PCIE lane width override */ + if (lane_width_override != 0xff) { + min_lane_width = MIN(pcie_width_cap, lane_width_override); + max_lane_width = MIN(pcie_width_cap, lane_width_override); + } else { + min_lane_width = MAX(1, table_member2[0]); + max_lane_width = MIN(pcie_width_cap, table_member2[1]); + min_lane_width = min_lane_width > max_lane_width ? + max_lane_width : min_lane_width; + } + pcie_table->pcie_lane[0] = min_lane_width; + pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((table_member1[i] <= pcie_gen_cap) ? - (table_member1[i] << 8) : - (pcie_gen_cap << 8)) | - ((table_member2[i] <= pcie_width_cap) ? - table_member2[i] : - pcie_width_cap); + smu_pcie_arg = (i << 16 | + pcie_table->pcie_gen[i] << 8 | + pcie_table->pcie_lane[i]); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, @@ -2101,11 +2162,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, NULL); if (ret) return ret; - - if (table_member1[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (table_member2[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; } return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 7433dcaa16e0..067b4e0b026c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) @@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5cdc07165480..8a8ba25c9ad7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, case SMU_VCLK: case SMU_DCLK: for (i = 0; i < count; i++) { - ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 393c6a7b9609..ca379181081c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -573,11 +573,11 @@ int smu_v13_0_init_power(struct smu_context *smu) if (smu_power->power_context || smu_power->power_context_size != 0) return -EINVAL; - smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), + smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context), GFP_KERNEL); if (!smu_power->power_context) return -ENOMEM; - smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); + smu_power->power_context_size = sizeof(struct smu_13_0_power_context); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 8fa9a36c38b6..6d9760eac16d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, break; for (i = 0; i < count; i++) { - ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 66445964efbd..0081fa607e02 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -866,7 +866,7 @@ out: static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; @@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 3d9ff46706fb..bba621615abf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -125,6 +125,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -1770,6 +1771,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost, .get_power_limit = smu_v13_0_7_get_power_limit, .set_power_limit = smu_v13_0_set_power_limit, + .set_power_source = smu_v13_0_set_power_source, .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode, .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode, .set_tool_table_location = smu_v13_0_set_tool_table_location, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 04e56b0b3033..798f36cfcebd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1000,7 +1000,7 @@ out: static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; |