diff options
author | Dave Airlie <airlied@redhat.com> | 2023-05-11 23:46:33 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2023-05-11 23:46:34 +0300 |
commit | d8843eebbbd15b78c6a7745717b3705eca923b0f (patch) | |
tree | efc3f4020e9e2f52a8ea7279c0eb139582879007 | |
parent | 9235c21c37facd131b4d126ce7535ca573f850e3 (diff) | |
parent | 996e93a3fe74dcf9d467ae3020aea42cc3ff65e3 (diff) | |
download | linux-d8843eebbbd15b78c6a7745717b3705eca923b0f.tar.xz |
Merge tag 'amd-drm-fixes-6.4-2023-05-11' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes
amdgpu:
- VCN3 fixes
- APUs always support PCI atomics
- Legacy power management fixes
- DCN 3.1.4 fix
- DCFCLK fix
- Fix several RAS irq refcount mismatches
- GPU Reset fix
- GFX 11.0.4 fix
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230511141755.7896-1-alexander.deucher@amd.com
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/nv.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/soc21.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c | 65 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 25 |
14 files changed, 131 insertions, 71 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 981a9cfb63b5..5c7d40873ee2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3757,6 +3757,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a + * internal path natively support atomics, set have_atomics_support to true. + */ + else if ((adev->flags & AMD_IS_APU) && + (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) + adev->have_atomics_support = true; else adev->have_atomics_support = !pci_enable_atomic_ops_to_root(adev->pdev, @@ -4506,7 +4512,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) dev_info(adev->dev, "recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { - shadow = &vmbo->bo; + /* If vm is compute context or adev is APU, shadow will be NULL */ + if (!vmbo->shadow) + continue; + shadow = vmbo->shadow; + /* No need to recover an evicted BO */ if (shadow->tbo.resource->mem_type != TTM_PL_TT || shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9d3a0542c996..f3f541ba0aca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -687,9 +687,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; - r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); - if (r) - goto late_fini; + if (adev->gfx.cp_ecc_error_irq.funcs) { + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (r) + goto late_fini; + } } else { amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index a9da0486467a..f5c376276984 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1315,13 +1315,6 @@ static int gfx_v11_0_sw_init(void *handle) if (r) return r; - /* ECC error */ - r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, - GFX_11_0_0__SRCID__CP_ECC_ERROR, - &adev->gfx.cp_ecc_error_irq); - if (r) - return r; - /* FED error */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, @@ -4444,7 +4437,6 @@ static int gfx_v11_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -5897,36 +5889,6 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev } } -#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1 -#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \ - do { \ - uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \ - tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \ - WREG32_SOC15_IP(GC, reg_addr, tmp); \ - } while (0) - -static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - uint32_t ecc_irq_state = 0; - uint32_t pipe0_int_cntl_addr = 0; - int i = 0; - - ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0; - - pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); - - WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state); - - for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) - SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL, - ecc_irq_state); - - return 0; -} - static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -6341,11 +6303,6 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { .process = gfx_v11_0_priv_inst_irq, }; -static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = { - .set = gfx_v11_0_set_cp_ecc_error_state, - .process = amdgpu_gfx_cp_ecc_error_irq, -}; - static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { .process = gfx_v11_0_rlc_gc_fed_irq, }; @@ -6361,9 +6318,6 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; - adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */ - adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs; - adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index adbcd8127c82..f46d4b18a3fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3764,7 +3764,8 @@ static int gfx_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index c55e09432e26..1c2292cc5f2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle) switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 1, 1): + case IP_VERSION(3, 1, 2): break; default: harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 98c826f1f89b..0fb6013441f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -98,6 +98,16 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = }; /* Sienna Cichlid */ +static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs sc_video_codecs_encode = { + .codec_count = ARRAY_SIZE(sc_video_codecs_encode_array), + .codec_array = sc_video_codecs_encode_array, +}; + static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, @@ -136,8 +146,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = /* SRIOV Sienna Cichlid, not const since data is controlled by host */ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, }; static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = @@ -237,12 +247,12 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, } else { if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn1; } else { if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn0; } @@ -251,14 +261,14 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, case IP_VERSION(3, 0, 16): case IP_VERSION(3, 0, 2): if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &sc_video_codecs_decode_vcn0; return 0; case IP_VERSION(3, 1, 1): case IP_VERSION(3, 1, 2): if (encode) - *codecs = &nv_video_codecs_encode; + *codecs = &sc_video_codecs_encode; else *codecs = &yc_video_codecs_decode; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index b3cc04dd8653..9295ac7edd56 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1917,9 +1917,11 @@ static int sdma_v4_0_hw_fini(void *handle) return 0; } - for (i = 0; i < adev->sdma.num_instances; i++) { - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + } } sdma_v4_0_ctx_switch_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 744be2a05623..d77162536514 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -711,7 +711,7 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_JPEG; - adev->external_rev_id = adev->rev_id + 0x1; + adev->external_rev_id = adev->rev_id + 0x80; break; default: diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c index 40c488b26901..cc3fe9cac5b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c @@ -423,3 +423,68 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool PERF_TRACE(); } +static void apply_symclk_on_tx_off_wa(struct dc_link *link) +{ + /* There are use cases where SYMCLK is referenced by OTG. For instance + * for TMDS signal, OTG relies SYMCLK even if TX video output is off. + * However current link interface will power off PHY when disabling link + * output. This will turn off SYMCLK generated by PHY. The workaround is + * to identify such case where SYMCLK is still in use by OTG when we + * power off PHY. When this is detected, we will temporarily power PHY + * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling + * program_pix_clk interface. When OTG is disabled, we will then power + * off PHY by calling disable link output again. + * + * In future dcn generations, we plan to rework transmitter control + * interface so that we could have an option to set SYMCLK ON TX OFF + * state in one step without this workaround + */ + + struct dc *dc = link->ctx->dc; + struct pipe_ctx *pipe_ctx = NULL; + uint8_t i; + + if (link->phy_state.symclk_ref_cnts.otg > 0) { + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { + pipe_ctx->clock_source->funcs->program_pix_clk( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + dc->link_srv->dp_get_encoding_format( + &pipe_ctx->link_config.dp_link_settings), + &pipe_ctx->pll_settings); + link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; + break; + } + } + } +} + +void dcn314_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct dmcu *dmcu = dc->res_pool->dmcu; + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + + link_hwss->disable_link_output(link, link_res, signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + /* + * Add the logic to extract BOTH power up and power down sequences + * from enable/disable link output and only call edp panel control + * in enable_link_dp and disable_link_dp once. + */ + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->unlock_phy(dmcu); + dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + + apply_symclk_on_tx_off_wa(link); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h index c786d5e6a428..6d0b62503caa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h @@ -45,4 +45,6 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); +void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal); + #endif /* __DC_HWSS_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c index 5267e901a35c..a588f46b166f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c @@ -105,7 +105,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, + .disable_link_output = dcn314_disable_link_output, .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 13c7e7394b1c..d75248b6cae9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -810,7 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SwathHeightY[k], v->SwathHeightC[k], TWait, - v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ? + (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ || + v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->DSTXAfterScaler[k], @@ -3310,7 +3311,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->swath_width_chroma_ub_this_state[k], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->TWait, - v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ? + (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index 500b3dd6052d..d98e36a9a09c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -53,6 +53,7 @@ #define BPP_BLENDED_PIPE 0xffffffff #define MEM_STROBE_FREQ_MHZ 1600 +#define MIN_DCFCLK_FREQ_MHZ 200 #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0 struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 300e156b924f..078aaaa53162 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -36,6 +36,8 @@ #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) +#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) + int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1460,15 +1462,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - struct smu_context *smu = adev->powerplay.pp_handle; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = adev->powerplay.pp_handle; - if ((is_support_sw_smu(adev) && smu->od_enabled) || - (is_support_sw_smu(adev) && smu->is_apu) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) - return true; + return (smu->od_enabled || smu->is_apu); + } else { + struct pp_hwmgr *hwmgr; - return false; + /* + * dpm on some legacy asics don't carry od_enabled member + * as its pp_handle is casted directly from adev. + */ + if (amdgpu_dpm_is_legacy_dpm(adev)) + return false; + + hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; + + return hwmgr->od_enabled; + } } int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, |