summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c529
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc_v1_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c301
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_trace.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn42_soc_bb.h263
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c201
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c421
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c553
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h112
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c177
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c390
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h71
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_offset.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_sh_mask.h52
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h9
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_offset.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_sh_mask.h105
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c42
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--include/drm/drm_fb_helper.h2
163 files changed, 3797 insertions, 2501 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 006d49d6b4af..9c9c73b73ac8 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -86,7 +86,7 @@ amdgpu-y += \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
- cyan_skillfish_reg_init.o soc_v1_0.o
+ cyan_skillfish_reg_init.o soc_v1_0.o lsdma_v7_1.o
# add DF block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index d1bf2e150c1a..aabe9d58c3dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -246,10 +246,10 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
return NULL;
}
-static struct edid *
+static const struct drm_edid *
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
{
- return drm_edid_duplicate(drm_edid_raw(adev->mode_info.bios_hardcoded_edid));
+ return drm_edid_dup(adev->mode_info.bios_hardcoded_edid);
}
static void amdgpu_connector_get_edid(struct drm_connector *connector)
@@ -268,8 +268,8 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if ((amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) &&
amdgpu_connector->ddc_bus->has_aux) {
- amdgpu_connector->edid = drm_get_edid(connector,
- &amdgpu_connector->ddc_bus->aux.ddc);
+ amdgpu_connector->edid = drm_edid_read_ddc(connector,
+ &amdgpu_connector->ddc_bus->aux.ddc);
} else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
@@ -277,14 +277,14 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
amdgpu_connector->ddc_bus->has_aux)
- amdgpu_connector->edid = drm_get_edid(connector,
- &amdgpu_connector->ddc_bus->aux.ddc);
+ amdgpu_connector->edid = drm_edid_read_ddc(connector,
+ &amdgpu_connector->ddc_bus->aux.ddc);
else if (amdgpu_connector->ddc_bus)
- amdgpu_connector->edid = drm_get_edid(connector,
- &amdgpu_connector->ddc_bus->adapter);
+ amdgpu_connector->edid = drm_edid_read_ddc(connector,
+ &amdgpu_connector->ddc_bus->adapter);
} else if (amdgpu_connector->ddc_bus) {
- amdgpu_connector->edid = drm_get_edid(connector,
- &amdgpu_connector->ddc_bus->adapter);
+ amdgpu_connector->edid = drm_edid_read_ddc(connector,
+ &amdgpu_connector->ddc_bus->adapter);
}
if (!amdgpu_connector->edid) {
@@ -292,30 +292,22 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP))) {
amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev);
- drm_connector_update_edid_property(connector, amdgpu_connector->edid);
+ drm_edid_connector_update(connector, amdgpu_connector->edid);
}
}
}
-static void amdgpu_connector_free_edid(struct drm_connector *connector)
-{
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- kfree(amdgpu_connector->edid);
- amdgpu_connector->edid = NULL;
-}
-
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
int ret;
if (amdgpu_connector->edid) {
- drm_connector_update_edid_property(connector, amdgpu_connector->edid);
- ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
+ drm_edid_connector_update(connector, amdgpu_connector->edid);
+ ret = drm_edid_connector_add_modes(connector);
return ret;
}
- drm_connector_update_edid_property(connector, NULL);
+ drm_edid_connector_update(connector, NULL);
return 0;
}
@@ -754,7 +746,7 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -873,7 +865,7 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) {
amdgpu_connector->detected_by_load = false;
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
@@ -883,13 +875,13 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
} else {
amdgpu_connector->use_digital =
- !!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ drm_edid_is_digital(amdgpu_connector->edid);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if (amdgpu_connector->use_digital && amdgpu_connector->shared_ddc) {
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
ret = connector_status_disconnected;
} else {
ret = connector_status_connected;
@@ -984,7 +976,7 @@ static void amdgpu_connector_shared_ddc(enum drm_connector_status *status,
/* hpd is our only option in this case */
if (!amdgpu_display_hpd_sense(adev,
amdgpu_connector->hpd.hpd)) {
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
*status = connector_status_disconnected;
}
}
@@ -1053,7 +1045,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
if (dret) {
amdgpu_connector->detected_by_load = false;
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
@@ -1063,13 +1055,13 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
broken_edid = true; /* defer use_digital to later */
} else {
amdgpu_connector->use_digital =
- !!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ drm_edid_is_digital(amdgpu_connector->edid);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if ((!amdgpu_connector->use_digital) && amdgpu_connector->shared_ddc) {
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
ret = connector_status_disconnected;
} else {
ret = connector_status_connected;
@@ -1417,7 +1409,7 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
goto out;
}
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index b42f866935ab..aa9239b310a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -39,6 +39,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_psp_ta.h"
+#include "amdgpu_userq.h"
#if defined(CONFIG_DEBUG_FS)
@@ -2156,6 +2157,53 @@ static const struct file_operations amdgpu_pt_info_fops = {
.release = single_release,
};
+static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
+{
+ struct amdgpu_usermode_queue *queue = m->private;
+ struct amdgpu_bo *bo;
+ int r;
+
+ if (!queue || !queue->mqd.obj)
+ return -EINVAL;
+
+ bo = amdgpu_bo_ref(queue->mqd.obj);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "queue_type: %d\n", queue->queue_type);
+ seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
+
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+}
+
+static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_mqd_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_mqd_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_mqd_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void amdgpu_debugfs_userq_init(struct drm_file *file, struct amdgpu_usermode_queue *queue, int qid)
+{
+ char queue_name[32];
+
+ scnprintf(queue_name, sizeof(queue_name), "queue_%d", qid);
+ queue->debugfs_queue = debugfs_create_dir(queue_name, file->debugfs_client);
+ debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
+}
+
void amdgpu_debugfs_vm_init(struct drm_file *file)
{
debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
@@ -2174,4 +2222,9 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
void amdgpu_debugfs_vm_init(struct drm_file *file)
{
}
+void amdgpu_debugfs_userq_init(struct drm_file *file,
+ struct amdgpu_usermode_queue *queue,
+ int qid)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index e7b3c38e5186..e88b4a1e564c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -25,6 +25,7 @@
/*
* Debugfs
*/
+struct amdgpu_usermode_queue;
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
@@ -34,4 +35,7 @@ void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
void amdgpu_debugfs_vm_init(struct drm_file *file);
+void amdgpu_debugfs_userq_init(struct drm_file *file,
+ struct amdgpu_usermode_queue *queue,
+ int qid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index c38e7371bafc..160f0704d1d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -261,6 +261,8 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
}
}
+ amdgpu_discovery_dump(coredump->adev, &p);
+
/* IP firmware information */
drm_printf(&p, "\nIP Firmwares\n");
amdgpu_devcoredump_fw_info(coredump->adev, &p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6f6973e8cd53..711b4502653a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1995,8 +1995,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
default:
r = amdgpu_discovery_set_ip_blocks(adev);
- if (r)
+ if (r) {
+ adev->num_ip_blocks = 0;
return r;
+ }
break;
}
@@ -2550,6 +2552,8 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
/* skip CG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
@@ -2589,6 +2593,8 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
/* skip PG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
@@ -2796,6 +2802,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].version)
+ continue;
if (!adev->ip_blocks[i].version->funcs->early_fini)
continue;
@@ -2873,6 +2881,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.sw)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_ucode_free_bo(adev);
amdgpu_free_static_csa(&adev->virt.csa_obj);
@@ -2899,6 +2909,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
if (adev->ip_blocks[i].version->funcs->late_fini)
adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.late_initialized = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 6c8b3c2687dc..f9f785c5d8ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -87,6 +87,7 @@
#include "sdma_v7_1.h"
#include "lsdma_v6_0.h"
#include "lsdma_v7_0.h"
+#include "lsdma_v7_1.h"
#include "vcn_v2_0.h"
#include "jpeg_v2_0.h"
#include "vcn_v3_0.h"
@@ -132,6 +133,7 @@ MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
+/* Note: These registers are consistent across all the SOCs */
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMP0_SMN_C2PMSG_33 0x16061
@@ -139,6 +141,10 @@ MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmDRIVER_SCRATCH_0 0x94
+#define mmDRIVER_SCRATCH_1 0x95
+#define mmDRIVER_SCRATCH_2 0x96
+
static const char *hw_id_names[HW_ID_MAX] = {
[MP1_HWID] = "MP1",
[MP2_HWID] = "MP2",
@@ -253,39 +259,12 @@ static int hw_id_map[MAX_HWIP] = {
[ATU_HWIP] = ATU_HWID,
};
-static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
+static int amdgpu_discovery_get_tmr_info(struct amdgpu_device *adev,
+ bool *is_tmr_in_sysmem)
{
- u64 tmr_offset, tmr_size, pos;
- void *discv_regn;
- int ret;
-
- ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
- if (ret)
- return ret;
-
- pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
-
- /* This region is read-only and reserved from system use */
- discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC);
- if (discv_regn) {
- memcpy(binary, discv_regn, adev->discovery.size);
- memunmap(discv_regn);
- return 0;
- }
-
- return -ENOENT;
-}
-
-#define IP_DISCOVERY_V2 2
-#define IP_DISCOVERY_V4 4
-
-static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
- uint8_t *binary)
-{
- bool sz_valid = true;
- uint64_t vram_size;
- int i, ret = 0;
- u32 msg;
+ u64 vram_size, tmr_offset, tmr_size;
+ u32 msg, tmr_offset_lo, tmr_offset_hi;
+ int i, ret;
if (!amdgpu_sriov_vf(adev)) {
/* It can take up to two second for IFWI init to complete on some dGPUs,
@@ -305,51 +284,98 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
}
vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
- if (!vram_size || vram_size == U32_MAX)
- sz_valid = false;
+ if (vram_size == U32_MAX)
+ return -ENXIO;
+ else if (!vram_size)
+ *is_tmr_in_sysmem = true;
else
- vram_size <<= 20;
+ *is_tmr_in_sysmem = false;
- /*
- * If in VRAM, discovery TMR is marked for reservation. If it is in system mem,
- * then it is not required to be reserved.
- */
- if (sz_valid) {
- if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
- /* For SRIOV VFs with dynamic critical region enabled,
- * we will get the IPD binary via below call.
- * If dynamic critical is disabled, fall through to normal seq.
- */
- if (amdgpu_virt_get_dynamic_data_info(adev,
- AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
- &adev->discovery.size)) {
- dev_err(adev->dev,
- "failed to read discovery info from dynamic critical region.");
- ret = -EINVAL;
- goto exit;
- }
- } else {
- uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+ /* init the default tmr size and offset */
+ adev->discovery.size = DISCOVERY_TMR_SIZE;
+ if (vram_size)
+ adev->discovery.offset = (vram_size << 20) - DISCOVERY_TMR_OFFSET;
+
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ adev->discovery.offset =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset;
+ adev->discovery.size =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb << 10;
+ if (!adev->discovery.offset || !adev->discovery.size)
+ return -EINVAL;
+ } else {
+ tmr_size = RREG32(mmDRIVER_SCRATCH_2);
+ if (tmr_size) {
+ /* It's preferred to transition to PSP mailbox reg interface
+ * for both bare-metal and passthrough if available */
+ adev->discovery.size = (u32)tmr_size;
+ tmr_offset_lo = RREG32(mmDRIVER_SCRATCH_0);
+ tmr_offset_hi = RREG32(mmDRIVER_SCRATCH_1);
+ adev->discovery.offset = ((u64)le32_to_cpu(tmr_offset_hi) << 32 |
+ le32_to_cpu(tmr_offset_lo));
+ } else if (!vram_size) {
+ /* fall back to apci approach to query tmr offset if vram_size is 0 */
+ ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
+ if (ret)
+ return ret;
+ adev->discovery.size = (u32)tmr_size;
+ adev->discovery.offset = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
+ }
+ }
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->discovery.size, false);
+ adev->discovery.bin = kzalloc(adev->discovery.size, GFP_KERNEL);
+ if (!adev->discovery.bin)
+ return -ENOMEM;
+ adev->discovery.debugfs_blob.data = adev->discovery.bin;
+ adev->discovery.debugfs_blob.size = adev->discovery.size;
+
+ return 0;
+}
+
+static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
+{
+ void *discv_regn;
+
+ /* This region is read-only and reserved from system use */
+ discv_regn = memremap(adev->discovery.offset, adev->discovery.size, MEMREMAP_WC);
+ if (discv_regn) {
+ memcpy(binary, discv_regn, adev->discovery.size);
+ memunmap(discv_regn);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+#define IP_DISCOVERY_V2 2
+#define IP_DISCOVERY_V4 4
+
+static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary,
+ bool is_tmr_in_sysmem)
+{
+ int ret = 0;
+
+ if (!is_tmr_in_sysmem) {
+ if (amdgpu_sriov_vf(adev) &&
+ amdgpu_sriov_xgmi_connected_to_cpu(adev)) {
+ ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
+ } else {
+ amdgpu_device_vram_access(adev, adev->discovery.offset,
+ (uint32_t *)binary,
+ adev->discovery.size, false);
adev->discovery.reserve_tmr = true;
}
} else {
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
- if (ret)
- dev_err(adev->dev,
- "failed to read discovery info from memory, vram size read: %llx",
- vram_size);
-exit:
return ret;
}
static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
- uint8_t *binary,
- const char *fw_name)
+ uint8_t *binary,
+ const char *fw_name)
{
const struct firmware *fw;
int r;
@@ -431,14 +457,12 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
}
static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
- struct binary_header *bhdr)
+ struct table_info *info)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct table_info *info;
uint16_t checksum;
uint16_t offset;
- info = &bhdr->table_list[NPS_INFO];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
@@ -491,23 +515,125 @@ static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
}
}
-static int amdgpu_discovery_init(struct amdgpu_device *adev)
+static int amdgpu_discovery_get_table_info(struct amdgpu_device *adev,
+ struct table_info **info,
+ uint16_t table_id)
+{
+ struct binary_header *bhdr =
+ (struct binary_header *)adev->discovery.bin;
+ struct binary_header_v2 *bhdrv2;
+
+ switch (bhdr->version_major) {
+ case 2:
+ bhdrv2 = (struct binary_header_v2 *)adev->discovery.bin;
+ *info = &bhdrv2->table_list[table_id];
+ break;
+ case 1:
+ *info = &bhdr->table_list[table_id];
+ break;
+ default:
+ dev_err(adev->dev, "Invalid ip discovery table version\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_table_check(struct amdgpu_device *adev,
+ uint8_t *discovery_bin,
+ uint16_t table_id)
{
+ int r, act_val, exp_val, table_size;
+ uint16_t offset, checksum;
struct table_info *info;
+ bool check_table = true;
+ char *table_name;
+
+ r = amdgpu_discovery_get_table_info(adev, &info, table_id);
+ if (r)
+ return r;
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ switch (table_id) {
+ case IP_DISCOVERY:
+ struct ip_discovery_header *ihdr =
+ (struct ip_discovery_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(ihdr->signature);
+ exp_val = DISCOVERY_TABLE_SIGNATURE;
+ table_size = le16_to_cpu(ihdr->size);
+ table_name = "data table";
+ break;
+ case GC:
+ struct gpu_info_header *ghdr =
+ (struct gpu_info_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(ghdr->table_id);
+ exp_val = GC_TABLE_ID;
+ table_size = le16_to_cpu(ghdr->size);
+ table_name = "gc table";
+ break;
+ case HARVEST_INFO:
+ struct harvest_info_header *hhdr =
+ (struct harvest_info_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(hhdr->signature);
+ exp_val = HARVEST_TABLE_SIGNATURE;
+ table_size = sizeof(struct harvest_table);
+ table_name = "harvest table";
+ break;
+ case VCN_INFO:
+ struct vcn_info_header *vhdr =
+ (struct vcn_info_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(vhdr->table_id);
+ exp_val = VCN_INFO_TABLE_ID;
+ table_size = le32_to_cpu(vhdr->size_bytes);
+ table_name = "vcn table";
+ break;
+ case MALL_INFO:
+ struct mall_info_header *mhdr =
+ (struct mall_info_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(mhdr->table_id);
+ exp_val = MALL_INFO_TABLE_ID;
+ table_size = le32_to_cpu(mhdr->size_bytes);
+ table_name = "mall table";
+ check_table = false;
+ break;
+ default:
+ dev_err(adev->dev, "invalid ip discovery table id %d specified\n", table_id);
+ check_table = false;
+ break;
+ }
+
+ if (check_table && offset) {
+ if (act_val != exp_val) {
+ dev_err(adev->dev, "invalid ip discovery %s signature\n", table_name);
+ return -EINVAL;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
+ table_size, checksum)) {
+ dev_err(adev->dev, "invalid ip discovery %s checksum\n", table_name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
+{
struct binary_header *bhdr;
uint8_t *discovery_bin;
const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
+ uint16_t table_id;
+ bool is_tmr_in_sysmem;
int r;
- adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
- if (!adev->discovery.bin)
- return -ENOMEM;
- adev->discovery.size = DISCOVERY_TMR_SIZE;
- adev->discovery.debugfs_blob.data = adev->discovery.bin;
- adev->discovery.debugfs_blob.size = adev->discovery.size;
+ r = amdgpu_discovery_get_tmr_info(adev, &is_tmr_in_sysmem);
+ if (r)
+ return r;
discovery_bin = adev->discovery.bin;
/* Read from file if it is the preferred option */
@@ -520,7 +646,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
} else {
drm_dbg(&adev->ddev, "use ip discovery information from memory");
- r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin);
+ r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin,
+ is_tmr_in_sysmem);
if (r)
goto out;
}
@@ -547,118 +674,10 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- info = &bhdr->table_list[IP_DISCOVERY];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (offset) {
- struct ip_discovery_header *ihdr =
- (struct ip_discovery_header *)(discovery_bin + offset);
- if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
- dev_err(adev->dev, "invalid ip discovery data table signature\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
- le16_to_cpu(ihdr->size),
- checksum)) {
- dev_err(adev->dev, "invalid ip discovery data table checksum\n");
- r = -EINVAL;
- goto out;
- }
- }
-
- info = &bhdr->table_list[GC];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (offset) {
- struct gpu_info_header *ghdr =
- (struct gpu_info_header *)(discovery_bin + offset);
-
- if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
- dev_err(adev->dev, "invalid ip discovery gc table id\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
- le32_to_cpu(ghdr->size),
- checksum)) {
- dev_err(adev->dev, "invalid gc data table checksum\n");
- r = -EINVAL;
- goto out;
- }
- }
-
- info = &bhdr->table_list[HARVEST_INFO];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (offset) {
- struct harvest_info_header *hhdr =
- (struct harvest_info_header *)(discovery_bin + offset);
-
- if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
- dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev,
- discovery_bin + offset,
- sizeof(struct harvest_table), checksum)) {
- dev_err(adev->dev, "invalid harvest data table checksum\n");
- r = -EINVAL;
- goto out;
- }
- }
-
- info = &bhdr->table_list[VCN_INFO];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (offset) {
- struct vcn_info_header *vhdr =
- (struct vcn_info_header *)(discovery_bin + offset);
-
- if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
- dev_err(adev->dev, "invalid ip discovery vcn table id\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev,
- discovery_bin + offset,
- le32_to_cpu(vhdr->size_bytes), checksum)) {
- dev_err(adev->dev, "invalid vcn data table checksum\n");
- r = -EINVAL;
- goto out;
- }
- }
-
- info = &bhdr->table_list[MALL_INFO];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (0 && offset) {
- struct mall_info_header *mhdr =
- (struct mall_info_header *)(discovery_bin + offset);
-
- if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
- dev_err(adev->dev, "invalid ip discovery mall table id\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev,
- discovery_bin + offset,
- le32_to_cpu(mhdr->size_bytes), checksum)) {
- dev_err(adev->dev, "invalid mall data table checksum\n");
- r = -EINVAL;
+ for (table_id = 0; table_id <= MALL_INFO; table_id++) {
+ r = amdgpu_discovery_table_check(adev, discovery_bin, table_id);
+ if (r)
goto out;
- }
}
return 0;
@@ -770,14 +789,15 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
uint32_t *umc_harvest_count)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
struct harvest_table *harvest_info;
u16 offset;
int i;
- uint32_t umc_harvest_config = 0;
+ u64 umc_harvest_config = 0;
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, HARVEST_INFO))
+ return;
+ offset = le16_to_cpu(info->offset);
if (!offset) {
dev_err(adev->dev, "invalid harvest table offset\n");
@@ -830,7 +850,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
}
}
- adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+ adev->umc.active_mask = ((1ULL << adev->umc.node_inst_num) - 1ULL) &
~umc_harvest_config;
}
@@ -1195,13 +1215,8 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
ip_hw_instance->num_instance);
ip_hw_instance->num_base_addresses = ip->num_base_address;
- for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
- if (reg_base_64)
- ip_hw_instance->base_addr[kk] =
- lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
- else
- ip_hw_instance->base_addr[kk] = ip->base_address[kk];
- }
+ for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
+ ip_hw_instance->base_addr[kk] = ip->base_address[kk];
kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
@@ -1224,7 +1239,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
{
struct ip_discovery_top *ip_top = adev->discovery.ip_top;
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
struct kset *die_kset = &ip_top->die_kset;
@@ -1232,10 +1247,12 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
size_t ip_offset;
int ii, res;
- bhdr = (struct binary_header *)discovery_bin;
+ res = amdgpu_discovery_get_table_info(adev, &info, IP_DISCOVERY);
+ if (res)
+ return res;
ihdr = (struct ip_discovery_header
*)(discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ le16_to_cpu(info->offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
@@ -1379,12 +1396,54 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
kobject_put(&ip_top->kobj);
}
+/* devcoredump support */
+void amdgpu_discovery_dump(struct amdgpu_device *adev, struct drm_printer *p)
+{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
+ struct ip_die_entry *ip_die_entry;
+ struct list_head *el_die, *el_hw_id, *el_hw_inst;
+ struct ip_hw_id *hw_id;
+ struct kset *die_kset;
+ struct ip_hw_instance *ip_inst;
+ int i = 0, j;
+
+ die_kset = &ip_top->die_kset;
+
+ drm_printf(p, "\nHW IP Discovery\n");
+ spin_lock(&die_kset->list_lock);
+ list_for_each(el_die, &die_kset->list) {
+ drm_printf(p, "die %d\n", i++);
+ ip_die_entry = to_ip_die_entry(list_to_kobj(el_die));
+
+ list_for_each(el_hw_id, &ip_die_entry->ip_kset.list) {
+ hw_id = to_ip_hw_id(list_to_kobj(el_hw_id));
+ drm_printf(p, "hw_id %d %s\n", hw_id->hw_id, hw_id_names[hw_id->hw_id]);
+
+ list_for_each(el_hw_inst, &hw_id->hw_id_kset.list) {
+ ip_inst = to_ip_hw_instance(list_to_kobj(el_hw_inst));
+ drm_printf(p, "\tinstance %d\n", ip_inst->num_instance);
+ drm_printf(p, "\tmajor %d\n", ip_inst->major);
+ drm_printf(p, "\tminor %d\n", ip_inst->minor);
+ drm_printf(p, "\trevision %d\n", ip_inst->revision);
+ drm_printf(p, "\tharvest 0x%01X\n", ip_inst->harvest);
+ drm_printf(p, "\tnum_base_addresses %d\n",
+ ip_inst->num_base_addresses);
+ for (j = 0; j < ip_inst->num_base_addresses; j++)
+ drm_printf(p, "\tbase_addr[%d] 0x%08X\n",
+ j, ip_inst->base_addr[j]);
+ }
+ }
+ }
+ spin_unlock(&die_kset->list_lock);
+}
+
+
/* ================================================== */
static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
{
uint8_t num_base_address, subrev, variant;
- struct binary_header *bhdr;
+ struct table_info *info;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
uint8_t *discovery_bin;
@@ -1409,10 +1468,12 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
adev->jpeg.inst_mask = 0;
- bhdr = (struct binary_header *)discovery_bin;
+ r = amdgpu_discovery_get_table_info(adev, &info, IP_DISCOVERY);
+ if (r)
+ return r;
ihdr = (struct ip_discovery_header
*)(discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ le16_to_cpu(info->offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
@@ -1585,14 +1646,15 @@ static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
uint8_t *discovery_bin = adev->discovery.bin;
struct ip_discovery_header *ihdr;
- struct binary_header *bhdr;
+ struct table_info *info;
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
- uint16_t offset, ihdr_ver;
+ uint16_t ihdr_ver;
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
- ihdr = (struct ip_discovery_header *)(discovery_bin + offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, IP_DISCOVERY))
+ return;
+ ihdr = (struct ip_discovery_header *)(discovery_bin +
+ le16_to_cpu(info->offset));
ihdr_ver = le16_to_cpu(ihdr->version);
/*
* Harvest table does not fit Navi1x and legacy GPUs,
@@ -1640,7 +1702,7 @@ union gc_info {
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
union gc_info *gc_info;
u16 offset;
@@ -1649,8 +1711,9 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[GC].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, GC))
+ return -EINVAL;
+ offset = le16_to_cpu(info->offset);
if (!offset)
return 0;
@@ -1749,7 +1812,7 @@ union mall_info {
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
union mall_info *mall_info;
u32 u, mall_size_per_umc, m_s_present, half_use;
u64 mall_size;
@@ -1760,8 +1823,9 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, MALL_INFO))
+ return -EINVAL;
+ offset = le16_to_cpu(info->offset);
if (!offset)
return 0;
@@ -1806,7 +1870,7 @@ union vcn_info {
static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
union vcn_info *vcn_info;
u16 offset;
int v;
@@ -1826,8 +1890,9 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, VCN_INFO))
+ return -EINVAL;
+ offset = le16_to_cpu(info->offset);
if (!offset)
return 0;
@@ -1864,14 +1929,26 @@ static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
uint64_t vram_size, pos, offset;
struct nps_info_header *nhdr;
struct binary_header bhdr;
+ struct binary_header_v2 bhdrv2;
uint16_t checksum;
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
- offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
- checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
+ switch (bhdr.version_major) {
+ case 2:
+ amdgpu_device_vram_access(adev, pos, &bhdrv2, sizeof(bhdrv2), false);
+ offset = le16_to_cpu(bhdrv2.table_list[NPS_INFO].offset);
+ checksum = le16_to_cpu(bhdrv2.table_list[NPS_INFO].checksum);
+ break;
+ case 1:
+ offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
+ checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
+ break;
+ default:
+ return -EINVAL;
+ }
amdgpu_device_vram_access(adev, (pos + offset), nps_data,
sizeof(*nps_data), false);
@@ -1894,7 +1971,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
{
uint8_t *discovery_bin = adev->discovery.bin;
struct amdgpu_gmc_memrange *mem_ranges;
- struct binary_header *bhdr;
+ struct table_info *info;
union nps_info *nps_info;
union nps_info nps_data;
u16 offset;
@@ -1915,14 +1992,15 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
return -EINVAL;
}
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, NPS_INFO))
+ return -EINVAL;
+ offset = le16_to_cpu(info->offset);
if (!offset)
return -ENOENT;
/* If verification fails, return as if NPS table doesn't exist */
- if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
+ if (amdgpu_discovery_verify_npsinfo(adev, info))
return -ENOENT;
nps_info = (union nps_info *)(discovery_bin + offset);
@@ -3226,6 +3304,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(7, 0, 1):
adev->lsdma.funcs = &lsdma_v7_0_funcs;
break;
+ case IP_VERSION(7, 1, 0):
+ adev->lsdma.funcs = &lsdma_v7_1_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 4ce04486cc31..a7aeb47887a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -30,10 +30,12 @@
#define DISCOVERY_TMR_OFFSET (64 << 10)
struct ip_discovery_top;
+struct drm_printer;
struct amdgpu_discovery_info {
struct debugfs_blob_wrapper debugfs_blob;
struct ip_discovery_top *ip_top;
+ uint64_t offset;
uint32_t size;
uint8_t *bin;
bool reserve_tmr;
@@ -47,4 +49,6 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
struct amdgpu_gmc_memrange **ranges,
int *range_cnt, bool refresh);
+void amdgpu_discovery_dump(struct amdgpu_device *adev, struct drm_printer *p);
+
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index bef9dce2e7ea..f5cd68542442 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1738,21 +1738,6 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
stime, etime, mode);
}
-static bool
-amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_fb_helper *fb_helper = dev->fb_helper;
-
- if (!fb_helper || !fb_helper->buffer)
- return false;
-
- if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
- return false;
-
- return true;
-}
-
int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
@@ -1775,7 +1760,6 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
@@ -1790,8 +1774,9 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
if (!fb || !fb->obj[0])
continue;
- robj = gem_to_amdgpu_bo(fb->obj[0]);
- if (!amdgpu_display_robj_is_fb(adev, robj)) {
+ if (!drm_fb_helper_gem_is_fb(dev->fb_helper, fb->obj[0])) {
+ struct amdgpu_bo *robj = gem_to_amdgpu_bo(fb->obj[0]);
+
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 0e8a52d96573..b9fdc3276e81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -94,6 +94,10 @@ enum amdgpu_memory_partition {
#define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20
#define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10
+#define AMDGPU_GMC121_FAULT_SOURCE_DATA_READ 0x400000
+#define AMDGPU_GMC121_FAULT_SOURCE_DATA_WRITE 0x200000
+#define AMDGPU_GMC121_FAULT_SOURCE_DATA_EXE 0x100000
+
/*
* GMC page fault information
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 77e2133de5cf..7f19554b9ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -83,7 +83,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- if (adev == NULL)
+ if (adev == NULL || !adev->num_ip_blocks)
return;
amdgpu_unregister_gpu_instance(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index bcf2a067dc41..f80e3aca9c78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -159,9 +159,9 @@ struct amdgpu_mes {
int hung_queue_db_array_size;
int hung_queue_hqd_info_offset;
- struct amdgpu_bo *hung_queue_db_array_gpu_obj[AMDGPU_MAX_MES_PIPES];
- uint64_t hung_queue_db_array_gpu_addr[AMDGPU_MAX_MES_PIPES];
- void *hung_queue_db_array_cpu_addr[AMDGPU_MAX_MES_PIPES];
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj[AMDGPU_MAX_MES_INST_PIPES];
+ uint64_t hung_queue_db_array_gpu_addr[AMDGPU_MAX_MES_INST_PIPES];
+ void *hung_queue_db_array_cpu_addr[AMDGPU_MAX_MES_INST_PIPES];
/* cooperative dispatch */
bool enable_coop_mode;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index dc8d2f52c7d6..90352284c5ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -368,15 +368,15 @@ struct amdgpu_mode_info {
struct drm_property *plane_ctm_property;
/**
- * @shaper_lut_property: Plane property to set pre-blending shaper LUT
- * that converts color content before 3D LUT. If
- * plane_shaper_tf_property != Identity TF, AMD color module will
+ * @plane_shaper_lut_property: Plane property to set pre-blending
+ * shaper LUT that converts color content before 3D LUT.
+ * If plane_shaper_tf_property != Identity TF, AMD color module will
* combine the user LUT values with pre-defined TF into the LUT
* parameters to be programmed.
*/
struct drm_property *plane_shaper_lut_property;
/**
- * @shaper_lut_size_property: Plane property for the size of
+ * @plane_shaper_lut_size_property: Plane property for the size of
* pre-blending shaper LUT as supported by the driver (read-only).
*/
struct drm_property *plane_shaper_lut_size_property;
@@ -400,10 +400,10 @@ struct amdgpu_mode_info {
*/
struct drm_property *plane_lut3d_property;
/**
- * @plane_degamma_lut_size_property: Plane property to define the max
- * size of 3D LUT as supported by the driver (read-only). The max size
- * is the max size of one dimension and, therefore, the max number of
- * entries for 3D LUT array is the 3D LUT size cubed;
+ * @plane_lut3d_size_property: Plane property to define the max size
+ * of 3D LUT as supported by the driver (read-only). The max size is
+ * the max size of one dimension and, therefore, the max number of
+ * entries for 3D LUT array is the 3D LUT size cubed.
*/
struct drm_property *plane_lut3d_size_property;
/**
@@ -624,7 +624,7 @@ struct amdgpu_connector {
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
- struct edid *edid;
+ const struct drm_edid *edid;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 27b67da9fdac..d39b695cd925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -3096,6 +3096,13 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
*/
continue;
+ /* IMU ucode is part of IFWI and MP0 15.0.8 would load it */
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
+ IP_VERSION(15, 0, 8) &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_IMU_I ||
+ ucode->ucode_id == AMDGPU_UCODE_ID_IMU_D))
+ continue;
+
psp_print_fw_hdr(psp, ucode);
ret = psp_execute_ip_fw_load(psp, ucode);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 1ab61e7b35db..82333aeb4453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -291,22 +291,22 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
break;
case 5:
/* rlc_hdr v2_5 */
- DRM_INFO("rlc_iram_ucode_size_bytes: %u\n",
+ DRM_DEBUG("rlc_iram_ucode_size_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->v2_2.rlc_iram_ucode_size_bytes));
- DRM_INFO("rlc_iram_ucode_offset_bytes: %u\n",
+ DRM_DEBUG("rlc_iram_ucode_offset_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->v2_2.rlc_iram_ucode_offset_bytes));
- DRM_INFO("rlc_dram_ucode_size_bytes: %u\n",
+ DRM_DEBUG("rlc_dram_ucode_size_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->v2_2.rlc_dram_ucode_size_bytes));
- DRM_INFO("rlc_dram_ucode_offset_bytes: %u\n",
+ DRM_DEBUG("rlc_dram_ucode_offset_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->v2_2.rlc_dram_ucode_offset_bytes));
/* rlc_hdr v2_5 */
- DRM_INFO("rlc_1_iram_ucode_size_bytes: %u\n",
+ DRM_DEBUG("rlc_1_iram_ucode_size_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->rlc_1_iram_ucode_size_bytes));
- DRM_INFO("rlc_1_iram_ucode_offset_bytes: %u\n",
+ DRM_DEBUG("rlc_1_iram_ucode_offset_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->rlc_1_iram_ucode_offset_bytes));
- DRM_INFO("rlc_1_dram_ucode_size_bytes: %u\n",
+ DRM_DEBUG("rlc_1_dram_ucode_size_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->rlc_1_dram_ucode_size_bytes));
- DRM_INFO("rlc_1_dram_ucode_offset_bytes: %u\n",
+ DRM_DEBUG("rlc_1_dram_ucode_offset_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->rlc_1_dram_ucode_offset_bytes));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 7c450350847d..6d964a6ee349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -709,46 +709,6 @@ static int amdgpu_userq_priority_permit(struct drm_file *filp,
return -EACCES;
}
-#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
-{
- struct amdgpu_usermode_queue *queue = m->private;
- struct amdgpu_bo *bo;
- int r;
-
- if (!queue || !queue->mqd.obj)
- return -EINVAL;
-
- bo = amdgpu_bo_ref(queue->mqd.obj);
- r = amdgpu_bo_reserve(bo, true);
- if (r) {
- amdgpu_bo_unref(&bo);
- return -EINVAL;
- }
-
- seq_printf(m, "queue_type: %d\n", queue->queue_type);
- seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
-
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
-
- return 0;
-}
-
-static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
-{
- return single_open(file, amdgpu_mqd_info_read, inode->i_private);
-}
-
-static const struct file_operations amdgpu_mqd_info_fops = {
- .owner = THIS_MODULE,
- .open = amdgpu_mqd_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
static int
amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
{
@@ -758,7 +718,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
const struct amdgpu_userq_funcs *uq_funcs;
struct amdgpu_usermode_queue *queue;
struct amdgpu_db_info db_info;
- char *queue_name;
bool skip_map_queue;
u32 qid;
uint64_t index;
@@ -819,17 +778,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
r = -EINVAL;
- kfree(queue);
- goto unlock;
+ goto free_queue;
}
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
- kfree(queue);
r = -EINVAL;
- goto unlock;
+ goto free_queue;
}
queue->doorbell_index = index;
@@ -837,42 +794,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_fence_driver_alloc(adev, queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
- goto unlock;
+ goto free_queue;
}
r = uq_funcs->mqd_create(queue, &args->in);
if (r) {
drm_file_err(uq_mgr->file, "Failed to create Queue\n");
- amdgpu_userq_fence_driver_free(queue);
- kfree(queue);
- goto unlock;
- }
-
- /* drop this refcount during queue destroy */
- kref_init(&queue->refcount);
-
- /* Wait for mode-1 reset to complete */
- down_read(&adev->reset_domain->sem);
- r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
- if (r) {
- kfree(queue);
- up_read(&adev->reset_domain->sem);
- goto unlock;
+ goto clean_fence_driver;
}
- r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
- XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
- if (r) {
- drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
- amdgpu_userq_fence_driver_free(queue);
- uq_funcs->mqd_destroy(queue);
- kfree(queue);
- r = -ENOMEM;
- up_read(&adev->reset_domain->sem);
- goto unlock;
- }
- up_read(&adev->reset_domain->sem);
-
/* don't map the queue if scheduling is halted */
if (adev->userq_halt_for_enforce_isolation &&
((queue->queue_type == AMDGPU_HW_IP_GFX) ||
@@ -884,31 +814,52 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_map_helper(queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
- xa_erase(&uq_mgr->userq_xa, qid);
- amdgpu_userq_fence_driver_free(queue);
- uq_funcs->mqd_destroy(queue);
- kfree(queue);
- goto unlock;
+ down_read(&adev->reset_domain->sem);
+ goto clean_mqd;
}
}
- queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
- if (!queue_name) {
+ /* drop this refcount during queue destroy */
+ kref_init(&queue->refcount);
+
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+
+ r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
+ XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
+ if (r) {
+ if (!skip_map_queue)
+ amdgpu_userq_unmap_helper(queue);
+
r = -ENOMEM;
- goto unlock;
+ goto clean_mqd;
}
-#if defined(CONFIG_DEBUG_FS)
- /* Queue dentry per client to hold MQD information */
- queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
- debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
-#endif
+ r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
+ if (r) {
+ xa_erase(&uq_mgr->userq_xa, qid);
+ if (!skip_map_queue)
+ amdgpu_userq_unmap_helper(queue);
+
+ goto clean_mqd;
+ }
+ up_read(&adev->reset_domain->sem);
+
+ amdgpu_debugfs_userq_init(filp, queue, qid);
amdgpu_userq_init_hang_detect_work(queue);
- kfree(queue_name);
args->out.queue_id = qid;
atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return 0;
+clean_mqd:
+ uq_funcs->mqd_destroy(queue);
+ up_read(&adev->reset_domain->sem);
+clean_fence_driver:
+ amdgpu_userq_fence_driver_free(queue);
+free_queue:
+ kfree(queue);
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
@@ -1089,12 +1040,12 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
struct amdgpu_bo *bo;
int ret;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated,
struct amdgpu_bo_va,
base.vm_status);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
bo = bo_va->base.bo;
ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
@@ -1111,9 +1062,9 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
if (ret)
return ret;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 886fbce0bfd1..9da0c6e9b869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -162,6 +162,7 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
AMDGIM_FEATURE_RAS_CPER = (1 << 11),
AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK = (1 << 12),
+ AMDGIM_FEATURE_XGMI_CONNECTED_TO_CPU = (1 << 13),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -412,6 +413,9 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_xgmi_ta_ext_peer_link_en(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK)
+#define amdgpu_sriov_xgmi_connected_to_cpu(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_CONNECTED_TO_CPU)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index dcd49b0fb6e0..76248a0276ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -153,12 +153,10 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
vm_bo->moved = true;
amdgpu_vm_assert_locked(vm);
- spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
* amdgpu_vm_bo_moved - vm_bo is moved
@@ -171,9 +169,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -187,9 +183,7 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
- spin_unlock(&vm_bo->vm->status_lock);
vm_bo->moved = false;
}
@@ -203,9 +197,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->status_lock);
+ spin_lock(&vm_bo->vm->invalidated_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
- spin_unlock(&vm_bo->vm->status_lock);
+ spin_unlock(&vm_bo->vm->invalidated_lock);
}
/**
@@ -218,10 +212,9 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
vm_bo->moved = true;
- spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -235,13 +228,10 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- if (vm_bo->bo->parent) {
- spin_lock(&vm_bo->vm->status_lock);
+ if (vm_bo->bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
- spin_unlock(&vm_bo->vm->status_lock);
- } else {
+ else
amdgpu_vm_bo_idle(vm_bo);
- }
}
/**
@@ -255,9 +245,7 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -271,13 +259,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
- amdgpu_vm_assert_locked(vm);
-
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
+ spin_unlock(&vm->invalidated_lock);
+ amdgpu_vm_assert_locked(vm);
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -287,14 +275,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
else if (bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
}
- spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_update_shared - helper to update shared memory stat
* @base: base structure for tracking BO usage in a VM
*
- * Takes the vm status_lock and updates the shared memory stat. If the basic
+ * Takes the vm stats_lock and updates the shared memory stat. If the basic
* stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
* as well.
*/
@@ -307,7 +294,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
bool shared;
dma_resv_assert_held(bo->tbo.base.resv);
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
base->shared = shared;
@@ -319,7 +306,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
vm->stats[bo_memtype].drm.private += size;
}
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
/**
@@ -344,11 +331,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
* be bo->tbo.resource
* @sign: if we should add (+1) or subtract (-1) from the stat
*
- * Caller need to have the vm status_lock held. Useful for when multiple update
+ * Caller need to have the vm stats_lock held. Useful for when multiple update
* need to happen at the same time.
*/
static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
- struct ttm_resource *res, int sign)
+ struct ttm_resource *res, int sign)
{
struct amdgpu_vm *vm = base->vm;
struct amdgpu_bo *bo = base->bo;
@@ -372,7 +359,8 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
*/
if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
vm->stats[res_memtype].drm.purgeable += size;
- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
+ if (!(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(res_memtype)))
vm->stats[bo_memtype].evicted += size;
}
}
@@ -391,9 +379,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
{
struct amdgpu_vm *vm = base->vm;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
amdgpu_vm_update_stats_locked(base, res, sign);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
/**
@@ -419,10 +407,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@@ -481,25 +469,25 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
int ret;
/* We can only trust prev->next while holding the lock */
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
while (!list_is_head(prev->next, &vm->done)) {
bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
bo = bo_va->base.bo;
if (bo) {
amdgpu_bo_ref(bo);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
amdgpu_bo_unref(&bo);
if (unlikely(ret))
return ret;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
}
prev = prev->next;
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
return 0;
}
@@ -595,7 +583,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
- struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_vm_bo_base *bo_base, *tmp;
struct amdgpu_bo *bo;
int r;
@@ -608,13 +596,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->evicted)) {
- bo_base = list_first_entry(&vm->evicted,
- struct amdgpu_vm_bo_base,
- vm_status);
- spin_unlock(&vm->status_lock);
-
+ list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
bo = bo_base->bo;
r = validate(param, bo);
@@ -627,26 +609,21 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
- spin_lock(&vm->status_lock);
}
- while (ticket && !list_empty(&vm->evicted_user)) {
- bo_base = list_first_entry(&vm->evicted_user,
- struct amdgpu_vm_bo_base,
- vm_status);
- spin_unlock(&vm->status_lock);
- bo = bo_base->bo;
- dma_resv_assert_held(bo->tbo.base.resv);
+ if (ticket) {
+ list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user,
+ vm_status) {
+ bo = bo_base->bo;
+ dma_resv_assert_held(bo->tbo.base.resv);
- r = validate(param, bo);
- if (r)
- return r;
-
- amdgpu_vm_bo_invalidated(bo_base);
+ r = validate(param, bo);
+ if (r)
+ return r;
- spin_lock(&vm->status_lock);
+ amdgpu_vm_bo_invalidated(bo_base);
+ }
}
- spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
@@ -675,9 +652,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
- spin_lock(&vm->status_lock);
ret &= list_empty(&vm->evicted);
- spin_unlock(&vm->status_lock);
spin_lock(&vm->immediate.lock);
ret &= !vm->immediate.stopped;
@@ -971,18 +946,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
- struct amdgpu_vm_bo_base *entry;
+ struct amdgpu_vm_bo_base *entry, *tmp;
bool flush_tlb_needed = false;
- LIST_HEAD(relocated);
int r, idx;
amdgpu_vm_assert_locked(vm);
- spin_lock(&vm->status_lock);
- list_splice_init(&vm->relocated, &relocated);
- spin_unlock(&vm->status_lock);
-
- if (list_empty(&relocated))
+ if (list_empty(&vm->relocated))
return 0;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -998,7 +968,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (r)
goto error;
- list_for_each_entry(entry, &relocated, vm_status) {
+ list_for_each_entry(entry, &vm->relocated, vm_status) {
/* vm_flush_needed after updating moved PDEs */
flush_tlb_needed |= entry->moved;
@@ -1014,9 +984,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (flush_tlb_needed)
atomic64_inc(&vm->tlb_seq);
- while (!list_empty(&relocated)) {
- entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
- vm_status);
+ list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) {
amdgpu_vm_bo_idle(entry);
}
@@ -1243,9 +1211,9 @@ error_free:
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
/**
@@ -1612,29 +1580,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo_va *bo_va, *tmp;
struct dma_resv *resv;
bool clear, unlock;
int r;
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->moved)) {
- bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
- base.vm_status);
- spin_unlock(&vm->status_lock);
-
+ list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- spin_lock(&vm->status_lock);
}
+ spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
/* Try to reserve the BO to avoid clearing its ptes */
if (!adev->debug_vm && dma_resv_trylock(resv)) {
@@ -1666,9 +1629,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
return 0;
}
@@ -2211,9 +2174,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
}
}
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@@ -2321,10 +2284,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
amdgpu_vm_bo_invalidate(bo, evicted);
@@ -2593,11 +2556,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
+ spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->invalidated);
- spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
INIT_KFIFO(vm->faults);
+ spin_lock_init(&vm->stats_lock);
r = amdgpu_vm_init_entities(adev, vm);
if (r)
@@ -3065,7 +3029,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
amdgpu_vm_assert_locked(vm);
- spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
@@ -3103,11 +3066,13 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
+ spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
+ spin_unlock(&vm->invalidated_lock);
total_invalidated_objs = id;
id = 0;
@@ -3117,7 +3082,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
continue;
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
- spin_unlock(&vm->status_lock);
total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 46628b0e699b..87b0617d4661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -205,11 +205,11 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
- /* protected by vm status_lock */
+ /* protected by vm reservation and invalidated_lock */
struct list_head vm_status;
/* if the bo is counted as shared in mem stats
- * protected by vm status_lock */
+ * protected by vm BO being reserved */
bool shared;
/* protected by the BO being reserved */
@@ -345,10 +345,8 @@ struct amdgpu_vm {
bool evicting;
unsigned int saved_flags;
- /* Lock to protect vm_bo add/del/move on all lists of vm */
- spinlock_t status_lock;
-
- /* Memory statistics for this vm, protected by status_lock */
+ /* Memory statistics for this vm, protected by stats_lock */
+ spinlock_t stats_lock;
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
/*
@@ -356,6 +354,8 @@ struct amdgpu_vm {
* PDs, PTs or per VM BOs. The state transits are:
*
* evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
+ *
+ * Lists are protected by the root PD dma_resv lock.
*/
/* Per-VM and PT BOs who needs a validation */
@@ -376,7 +376,10 @@ struct amdgpu_vm {
* state transits are:
*
* evicted_user or invalidated -> done
+ *
+ * Lists are protected by the invalidated_lock.
*/
+ spinlock_t invalidated_lock;
/* BOs for user mode queues that need a validation */
struct list_head evicted_user;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 31a437ce9570..7bdd664f0770 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -544,9 +544,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
- spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
- spin_unlock(&entry->vm->status_lock);
amdgpu_bo_unref(&entry->bo);
}
@@ -590,7 +588,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_pt_cursor seek;
struct amdgpu_vm_bo_base *entry;
- spin_lock(&params->vm->status_lock);
for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
if (entry && entry->bo)
list_move(&entry->vm_status, &params->tlb_flush_waitlist);
@@ -598,7 +595,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
/* enter start node now */
list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
- spin_unlock(&params->vm->status_lock);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index a841f342a3eb..847cfd1fd004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -161,7 +161,8 @@ union amd_sriov_msg_feature_flags {
uint32_t ras_telemetry : 1;
uint32_t ras_cper : 1;
uint32_t xgmi_ta_ext_peer_link : 1;
- uint32_t reserved : 19;
+ uint32_t xgmi_connected_to_cpu : 1;
+ uint32_t reserved : 18;
} flags;
uint32_t all;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index f1052acea5ec..c8f465158e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1298,7 +1298,7 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
return;
}
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(drm_edid_raw(amdgpu_connector->edid), &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@@ -1368,7 +1368,7 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(drm_edid_raw(amdgpu_connector->edid), &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index c153a6e1e22a..58d0da5c2a74 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1265,7 +1265,7 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(drm_edid_raw(amdgpu_connector->edid), &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@@ -1346,7 +1346,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(drm_edid_raw(amdgpu_connector->edid), &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a85a9e32fde4..6d19f6d94d25 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1271,7 +1271,7 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(drm_edid_raw(amdgpu_connector->edid), &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@@ -1339,7 +1339,7 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(drm_edid_raw(amdgpu_connector->edid), &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
index 557d15b90ad2..4e02b62cdbb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
@@ -1155,11 +1155,13 @@ static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
- /* recalculate compute rings to use based on hardware configuration */
- num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
- adev->gfx.mec.num_queue_per_pipe) / 2;
- adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
- num_compute_rings);
+ if (adev->gfx.num_compute_rings) {
+ /* recalculate compute rings to use based on hardware configuration */
+ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe) / 2;
+ adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
+ num_compute_rings);
+ }
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
@@ -2794,6 +2796,33 @@ static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev,
gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
}
+static int gfx_v12_1_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->gfx.disable_kq) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -2801,6 +2830,7 @@ static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ gfx_v12_1_set_userq_eop_interrupts(adev, false);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
@@ -2868,10 +2898,26 @@ static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+
+ switch (amdgpu_user_queue) {
+ case -1:
+ default:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = true;
+ break;
+ case 0:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v12_1_gfx_funcs;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq)
+ adev->gfx.num_compute_rings = 0;
+ else
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
gfx_v12_1_set_kiq_pm4_funcs(adev);
gfx_v12_1_set_ring_funcs(adev);
@@ -2898,6 +2944,10 @@ static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = gfx_v12_1_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -3630,12 +3680,6 @@ static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
return -EINVAL;
switch (me_id) {
- case 0:
- if (pipe_id == 0)
- amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
- else
- amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
- break;
case 1:
case 2:
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
@@ -3652,6 +3696,9 @@ static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
amdgpu_fence_process(ring);
}
break;
+ default:
+ dev_dbg(adev->dev, "Unexpected me %d in eop_irq\n", me_id);
+ break;
}
}
@@ -3719,29 +3766,23 @@ static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev,
if (xcc_id == -EINVAL)
return;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring
[i +
xcc_id * adev->gfx.num_compute_rings];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ dev_dbg(adev->dev, "Unexpected me %d in priv_fault\n", me_id);
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7e9d753f4a80..95be105671ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2355,7 +2355,7 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
ring = &adev->gfx.sw_gfx_ring[i];
ring->ring_obj = NULL;
- sprintf(ring->name, amdgpu_sw_ring_name(i));
+ strscpy(ring->name, amdgpu_sw_ring_name(i), sizeof(ring->name));
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
ring->is_sw_ring = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c
index dc8865c5879c..c49112d8300e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c
@@ -121,7 +121,7 @@ static int gmc_v12_1_process_interrupt(struct amdgpu_device *adev,
if (entry->src_id == UTCL2_1_0__SRCID__RETRY) {
retry_fault = true;
- write_fault = !!(entry->src_data[1] & 0x200000);
+ write_fault = !!(entry->src_data[1] & AMDGPU_GMC121_FAULT_SOURCE_DATA_WRITE);
}
if (entry->client_id == SOC_V1_0_IH_CLIENTID_VMC) {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 451828bf583e..1fbe904f4223 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -289,6 +289,13 @@ static uint32_t ih_v7_0_setup_retry_doorbell(u32 doorbell_index)
return val;
}
+#define regIH_RING1_CLIENT_CFG_INDEX_V7_1 0x122
+#define regIH_RING1_CLIENT_CFG_INDEX_V7_1_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_DATA_V7_1 0x123
+#define regIH_RING1_CLIENT_CFG_DATA_V7_1_BASE_IDX 0
+#define regIH_CHICKEN_V7_1 0x129
+#define regIH_CHICKEN_V7_1_BASE_IDX 0
+
/**
* ih_v7_0_irq_init - init and enable the interrupt ring
*
@@ -307,6 +314,7 @@ static int ih_v7_0_irq_init(struct amdgpu_device *adev)
u32 tmp;
int ret;
int i;
+ u32 reg_addr;
/* disable irqs */
ret = ih_v7_0_toggle_interrupts(adev, false);
@@ -318,10 +326,15 @@ static int ih_v7_0_irq_init(struct amdgpu_device *adev)
if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
if (ih[0]->use_bus_addr) {
- ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
+ if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(7, 1, 0))
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_CHICKEN_V7_1);
+ else
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_CHICKEN);
+ ih_chicken = RREG32(reg_addr);
+ /* The reg fields definitions are identical in ih v7_0 and ih v7_1 */
ih_chicken = REG_SET_FIELD(ih_chicken,
IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
+ WREG32(reg_addr, ih_chicken);
}
}
@@ -358,17 +371,26 @@ static int ih_v7_0_irq_init(struct amdgpu_device *adev)
/* Redirect the interrupts to IH RB1 for dGPU */
if (adev->irq.ih1.ring_size) {
- tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
+ if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(7, 1, 0))
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX_V7_1);
+ else
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
+ tmp = RREG32(reg_addr);
+ /* The reg fields definitions are identical in ih v7_0 and ih v7_1 */
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
- WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
+ WREG32(reg_addr, tmp);
- tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
+ if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(7, 1, 0))
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA_V7_1);
+ else
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
+ tmp = RREG32(reg_addr);
+ /* The reg fields definitions are identical in ih v7_0 and ih v7_1 */
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
SOURCE_ID_MATCH_ENABLE, 0x1);
-
- WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
+ WREG32(reg_addr, tmp);
}
pci_set_master(adev->pdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.c b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.c
new file mode 100644
index 000000000000..d93a0e65ce7d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include "amdgpu.h"
+#include "lsdma_v7_1.h"
+#include "amdgpu_lsdma.h"
+
+#include "lsdma/lsdma_7_1_0_offset.h"
+#include "lsdma/lsdma_7_1_0_sh_mask.h"
+
+static int lsdma_v7_1_wait_pio_status(struct amdgpu_device *adev)
+{
+ return amdgpu_lsdma_wait_for(adev, SOC15_REG_OFFSET(LSDMA, 0, regLSDMA_PIO_STATUS),
+ LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK,
+ LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK);
+}
+
+static int lsdma_v7_1_copy_mem(struct amdgpu_device *adev,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint64_t size)
+{
+ int ret;
+ uint32_t tmp;
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_LO, lower_32_bits(src_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_HI, upper_32_bits(src_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, COUNT, size);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, RAW_WAIT, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 0);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
+
+ ret = lsdma_v7_1_wait_pio_status(adev);
+ if (ret)
+ dev_err(adev->dev, "LSDMA PIO failed to copy memory!\n");
+
+ return ret;
+}
+
+static int lsdma_v7_1_fill_mem(struct amdgpu_device *adev,
+ uint64_t dst_addr,
+ uint32_t data,
+ uint64_t size)
+{
+ int ret;
+ uint32_t tmp;
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONSTFILL_DATA, data);
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, COUNT, size);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, RAW_WAIT, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 1);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
+
+ ret = lsdma_v7_1_wait_pio_status(adev);
+ if (ret)
+ dev_err(adev->dev, "LSDMA PIO failed to fill memory!\n");
+
+ return ret;
+}
+
+const struct amdgpu_lsdma_funcs lsdma_v7_1_funcs = {
+ .copy_mem = lsdma_v7_1_copy_mem,
+ .fill_mem = lsdma_v7_1_fill_mem,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.h b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.h
new file mode 100644
index 000000000000..3d1ab605849a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __LSDMA_V7_1_H__
+#define __LSDMA_V7_1_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_lsdma_funcs lsdma_v7_1_funcs;
+
+#endif /* __LSDMA_V7_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 5bfa5d1d0b36..023c7345ea54 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -731,6 +731,9 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
int i;
struct amdgpu_device *adev = mes->adev;
union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
+ uint32_t mes_rev = (pipe == AMDGPU_MES_SCHED_PIPE) ?
+ (mes->sched_version & AMDGPU_MES_VERSION_MASK) :
+ (mes->kiq_version & AMDGPU_MES_VERSION_MASK);
memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
@@ -785,7 +788,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
* handling support, other queue will not use the oversubscribe timer.
* handling mode - 0: disabled; 1: basic version; 2: basic+ version
*/
- mes_set_hw_res_pkt.oversubscription_timer = 50;
+ mes_set_hw_res_pkt.oversubscription_timer = mes_rev < 0x8b ? 0 : 50;
mes_set_hw_res_pkt.unmapped_doorbell_handling = 1;
if (amdgpu_mes_log_enable) {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
index 7b8c670d0a9e..d8e4b52bdfd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
@@ -1611,7 +1611,6 @@ static int mes_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[inst],
&adev->mes.eop_gpu_addr[inst],
NULL);
- amdgpu_ucode_release(&adev->mes.fw[inst]);
if (adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) {
amdgpu_bo_free_kernel(&adev->mes.ring[inst].mqd_obj,
@@ -1622,6 +1621,9 @@ static int mes_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
}
}
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++)
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
+
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
if (!adev->enable_uni_mes) {
amdgpu_bo_free_kernel(&adev->gfx.kiq[xcc_id].ring.mqd_obj,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index 308f32daa780..d1adf19a51c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -478,7 +478,7 @@ static int soc24_common_hw_init(struct amdgpu_ip_block *ip_block)
if (adev->nbio.funcs->remap_hdp_registers)
adev->nbio.funcs->remap_hdp_registers(adev);
- if (adev->df.funcs->hw_init)
+ if (adev->df.funcs && adev->df.funcs->hw_init)
adev->df.funcs->hw_init(adev);
/* enable the doorbell aperture */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c b/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c
index 26e7566a5479..0c7759b82fa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c
@@ -57,7 +57,7 @@ static void soc_v1_0_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.userqueue_end = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_END;
adev->doorbell_index.xcc_doorbell_range = AMDGPU_SOC_V1_0_DOORBELL_XCC_RANGE;
- adev->doorbell_index.sdma_doorbell_range = 20;
+ adev->doorbell_index.sdma_doorbell_range = 14;
for (i = 0; i < adev->sdma.num_instances; i++)
adev->doorbell_index.sdma_engine[i] =
AMDGPU_SOC_V1_0_DOORBELL_sDMA_ENGINE_START +
@@ -214,23 +214,35 @@ static bool soc_v1_0_need_full_reset(struct amdgpu_device *adev)
static bool soc_v1_0_need_reset_on_init(struct amdgpu_device *adev)
{
- u32 sol_reg;
- if (adev->flags & AMD_IS_APU)
- return false;
+ return false;
+}
- /* Check sOS sign of life register to confirm sys driver and sOS
- * are already been loaded.
- */
- sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81);
- if (sol_reg)
- return true;
+static enum amd_reset_method
+soc_v1_0_asic_reset_method(struct amdgpu_device *adev)
+{
+ if ((adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(15, 0, 8))) {
+ if (amdgpu_reset_method != -1)
+ dev_warn_once(adev->dev, "Reset override isn't supported, using Mode2 instead.\n");
- return false;
+ return AMD_RESET_METHOD_MODE2;
+ }
+
+ return amdgpu_reset_method;
}
static int soc_v1_0_asic_reset(struct amdgpu_device *adev)
{
+ switch (soc_v1_0_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_MODE2:
+ dev_info(adev->dev, "MODE2 reset\n");
+ return amdgpu_dpm_mode2_reset(adev);
+ default:
+ dev_info(adev->dev, "Invalid reset method Not supported\n");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -244,6 +256,7 @@ static const struct amdgpu_asic_funcs soc_v1_0_asic_funcs = {
.need_reset_on_init = &soc_v1_0_need_reset_on_init,
.encode_ext_smn_addressing = &soc_v1_0_encode_ext_smn_addressing,
.reset = soc_v1_0_asic_reset,
+ .reset_method = &soc_v1_0_asic_reset_method,
};
static int soc_v1_0_common_early_init(struct amdgpu_ip_block *ip_block)
@@ -268,7 +281,8 @@ static int soc_v1_0_common_early_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 1, 0):
- adev->cg_flags = 0;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x50;
break;
@@ -809,7 +823,7 @@ int soc_v1_0_init_soc_config(struct amdgpu_device *adev)
{
int ret, i;
int xcc_inst_per_aid = 4;
- uint16_t xcc_mask;
+ uint16_t xcc_mask, sdma_mask = 0;
xcc_mask = adev->gfx.xcc_mask;
adev->aid_mask = 0;
@@ -819,10 +833,12 @@ int soc_v1_0_init_soc_config(struct amdgpu_device *adev)
}
adev->sdma.num_inst_per_xcc = 2;
- adev->sdma.num_instances =
- NUM_XCC(adev->gfx.xcc_mask) * adev->sdma.num_inst_per_xcc;
- adev->sdma.sdma_mask =
- GENMASK(adev->sdma.num_instances - 1, 0);
+ for_each_inst(i, adev->gfx.xcc_mask)
+ sdma_mask |=
+ GENMASK(adev->sdma.num_inst_per_xcc - 1, 0) <<
+ (i * adev->sdma.num_inst_per_xcc);
+ adev->sdma.sdma_mask = sdma_mask;
+ adev->sdma.num_instances = NUM_XCC(adev->sdma.sdma_mask);
ret = soc_v1_0_xcp_mgr_init(adev);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 3ddf06c755b5..ab3b2e7be9bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2720,7 +2720,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
ctl_stack, ctl_stack_used_size, save_area_used_size);
}
-static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
+static int get_queue_checkpoint_info(struct device_queue_manager *dqm,
const struct queue *q,
u32 *mqd_size,
u32 *ctl_stack_size)
@@ -2728,6 +2728,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
enum KFD_MQD_TYPE mqd_type =
get_mqd_type_from_queue_type(q->properties.type);
+ int ret = 0;
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
@@ -2735,9 +2736,11 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
- mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
+ ret = mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
dqm_unlock(dqm);
+
+ return ret;
}
static int checkpoint_mqd(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index ef07e44916f8..3272328da11f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -192,7 +192,7 @@ struct device_queue_manager_ops {
int (*reset_queues)(struct device_queue_manager *dqm,
uint16_t pasid);
- void (*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
+ int (*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
const struct queue *q, u32 *mqd_size,
u32 *ctl_stack_size);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 2429d278ef0e..06ca6235ff1b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -102,7 +102,8 @@ struct mqd_manager {
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
- void (*get_checkpoint_info)(struct mqd_manager *mm, void *mqd, uint32_t *ctl_stack_size);
+ int (*get_checkpoint_info)(struct mqd_manager *mm, void *mqd,
+ uint32_t *ctl_stack_size);
void (*checkpoint_mqd)(struct mqd_manager *mm,
void *mqd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 19f21932a5ce..979ae94ac966 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -385,11 +385,14 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
return 0;
}
-static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
+static int get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
{
struct v9_mqd *m = get_mqd(mqd);
- *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
+ if (check_mul_overflow(m->cp_hqd_cntl_stack_size, NUM_XCC(mm->dev->xcc_mask), ctl_stack_size))
+ return -EINVAL;
+
+ return 0;
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index f02ef2d44a07..431a20323146 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -274,10 +274,11 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
return 0;
}
-static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
+static int get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
{
/* Control stack is stored in user mode */
*ctl_stack_size = 0;
+ return 0;
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 12e24fbf8c46..a031166f270c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -585,7 +585,7 @@ static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
ret = kobject_init_and_add(pdd->kobj_stats,
&procfs_stats_type,
p->kobj,
- stats_dir_filename);
+ "%s", stats_dir_filename);
if (ret) {
pr_warn("Creating KFD proc/stats_%s folder failed",
@@ -632,7 +632,7 @@ static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
return;
ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
- p->kobj, counters_dir_filename);
+ p->kobj, "%s", counters_dir_filename);
if (ret) {
pr_warn("Creating KFD proc/%s folder failed",
counters_dir_filename);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 8ea31699d38b..cc2621ae12f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -593,6 +593,7 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm,
p->queue_size)) {
pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n",
p->queue_address, p->queue_size);
+ amdgpu_bo_unreserve(vm->root.bo);
return -EFAULT;
}
@@ -1069,6 +1070,7 @@ int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
uint32_t *ctl_stack_size)
{
struct process_queue_node *pqn;
+ int ret;
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
@@ -1081,9 +1083,14 @@ int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
return -EOPNOTSUPP;
}
- pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
+ ret = pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
pqn->q, mqd_size,
ctl_stack_size);
+ if (ret) {
+ pr_debug("amdkfd: Overflow while computing stack size for queue %d\n", qid);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index dfe95c9b8746..65b256a7b6c4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2473,6 +2473,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_LSDMA_BUFFER
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_CURSOR_OFFLOAD
};
int r;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index f8c21da62819..d8c69fc94abb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -62,6 +62,9 @@ static const uint32_t rgb_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010
};
static const uint32_t overlay_formats[] = {
@@ -1908,7 +1911,8 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_zpos_immutable_property(plane, 255);
}
- if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
+ if ((plane->type == DRM_PLANE_TYPE_PRIMARY ||
+ plane->type == DRM_PLANE_TYPE_OVERLAY) &&
plane_cap &&
(plane_cap->pixel_format_support.nv12 ||
plane_cap->pixel_format_support.p010)) {
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 242032c047ed..f947f82013c6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -794,13 +794,11 @@ static enum bp_result bios_parser_external_encoder_control(
static enum bp_result bios_parser_dac_load_detection(
struct dc_bios *dcb,
- enum engine_id engine_id,
- struct graphics_object_id ext_enc_id)
+ enum engine_id engine_id)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
struct dc_context *ctx = dcb->ctx;
struct bp_load_detection_parameters bp_params = {0};
- struct bp_external_encoder_control ext_cntl = {0};
enum bp_result bp_result = BP_RESULT_UNSUPPORTED;
uint32_t bios_0_scratch;
uint32_t device_id_mask = 0;
@@ -826,13 +824,6 @@ static enum bp_result bios_parser_dac_load_detection(
bp_params.engine_id = engine_id;
bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params);
- } else if (ext_enc_id.id) {
- if (!bp->cmd_tbl.external_encoder_control)
- return BP_RESULT_UNSUPPORTED;
-
- ext_cntl.action = EXTERNAL_ENCODER_CONTROL_DAC_LOAD_DETECT;
- ext_cntl.encoder_id = ext_enc_id;
- bp_result = bp->cmd_tbl.external_encoder_control(bp, &ext_cntl);
}
if (bp_result != BP_RESULT_OK)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 5ad66d873aad..e91636d033c7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -48,6 +48,7 @@
#include "dcn32/dcn32_clk_mgr.h"
#include "dcn35/dcn35_clk_mgr.h"
#include "dcn401/dcn401_clk_mgr.h"
+#include "dcn42/dcn42_clk_mgr.h"
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
@@ -362,6 +363,18 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return &clk_mgr->base;
}
break;
+ case AMDGPU_FAMILY_GC_11_5_4: {
+ struct clk_mgr_dcn42 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn42_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base.base;
+ }
+ break;
#endif /* CONFIG_DRM_AMD_DC_FP */
default:
ASSERT(0); /* Unknown Asic */
@@ -419,6 +432,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
case AMDGPU_FAMILY_GC_12_0_0:
dcn401_clk_mgr_destroy(clk_mgr);
break;
+ case AMDGPU_FAMILY_GC_11_5_4:
+ dcn42_clk_mgr_destroy(clk_mgr);
+ break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index b0aba3a6f13c..b48522480dfd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -547,6 +547,7 @@ void dcn3_clk_mgr_construct(
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3650000;
+
/* Convert dprefclk units from MHz to KHz */
/* Value already divided by 10, some resolution lost */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
index 55434f046fa2..97c9f0ce13e4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
@@ -31,59 +31,19 @@
#include "link_service.h"
#include "logger_types.h"
+#include "clk/clk_15_0_0_offset.h"
+#include "clk/clk_15_0_0_sh_mask.h"
+#include "dcn/dcn_4_2_0_offset.h"
+#include "dcn/dcn_4_2_0_sh_mask.h"
+
#undef DC_LOGGER
#define DC_LOGGER \
- clk_mgr->base.base.ctx->logger
-
-
-#define DCN_BASE__INST0_SEG1 0x000000C0
-
-#define regCLK8_CLK2_BYPASS_CNTL 0x4c2a
-#define regCLK8_CLK2_BYPASS_CNTL_BASE_IDX 0
-#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
-#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
-#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
-#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
-
-#define regDENTIST_DISPCLK_CNTL 0x0064
-#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
-
-// DENTIST_DISPCLK_CNTL
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
-#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
-#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
-#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
-#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
-#define mmDENTIST_DISPCLK_CNTL 0x0124
-#define mmCLK8_CLK_TICK_CNT_CONFIG_REG 0x1B851
-#define mmCLK8_CLK0_CURRENT_CNT 0x1B853
-#define mmCLK8_CLK1_CURRENT_CNT 0x1B854
-#define mmCLK8_CLK2_CURRENT_CNT 0x1B855
-#define mmCLK8_CLK3_CURRENT_CNT 0x1B856
-#define mmCLK8_CLK4_CURRENT_CNT 0x1B857
-
-
-#define mmCLK8_CLK0_BYPASS_CNTL 0x1B81A
-#define mmCLK8_CLK1_BYPASS_CNTL 0x1B822
-#define mmCLK8_CLK2_BYPASS_CNTL 0x1B82A
-#define mmCLK8_CLK3_BYPASS_CNTL 0x1B832
-#define mmCLK8_CLK4_BYPASS_CNTL 0x1B83A
-
-
-#define mmCLK8_CLK0_DS_CNTL 0x1B814
-#define mmCLK8_CLK1_DS_CNTL 0x1B81C
-#define mmCLK8_CLK2_DS_CNTL 0x1B824
-#define mmCLK8_CLK3_DS_CNTL 0x1B82C
-#define mmCLK8_CLK4_DS_CNTL 0x1B834
-
-
+ dc_logger
+#define DC_LOGGER_INIT(logger) \
+ struct dal_logger *dc_logger = logger
+#define DCN42_CLKIP_REFCLK 48000
#undef FN
#define FN(reg_name, field_name) \
@@ -92,16 +52,25 @@
#define REG(reg) \
(clk_mgr->regs->reg)
+// for DCN register access
+#define DCN_BASE__INST0_SEG0 0x00000012
+#define DCN_BASE__INST0_SEG1 0x000000C0
+
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
-#define SR(reg_name)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+#define SR(reg_name) \
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
+
+// for CLKIP register access
+#define CLK_BASE__INST0_SEG0 0x00016C00
-#define CLK_SR_DCN42(reg_name)\
- .reg_name = mm ## reg_name
+#define CLK_BASE_INNER(seg) \
+ CLK_BASE__INST0_SEG ## seg
+
+#define CLK_SR_DCN42(reg_name) \
+ .reg_name = CLK_BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
static const struct clk_mgr_registers clk_mgr_regs_dcn42 = {
CLK_REG_LIST_DCN42()
@@ -115,26 +84,21 @@ static const struct clk_mgr_mask clk_mgr_mask_dcn42 = {
CLK_COMMON_MASK_SH_LIST_DCN42(_MASK)
};
-
-
#define TO_CLK_MGR_DCN42(clk_mgr_int)\
container_of(clk_mgr_int, struct clk_mgr_dcn42, base)
-int dcn42_get_active_display_cnt_wa(
- struct dc *dc,
- struct dc_state *context,
- int *all_active_disps)
+bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context)
{
- int i, display_count = 0;
- bool tmds_present = false;
+ int i, active_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
- if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
- stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
- stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
- tmds_present = true;
+ /* Checking stream / link detection ensuring that PHY is active*/
+ if (dc_is_hdmi_signal(stream->signal) ||
+ dc_is_dvi_signal(stream->signal) ||
+ (dc_is_dp_signal(stream->signal) && !stream->dpms_off))
+ active_count++;
}
for (i = 0; i < dc->link_count; i++) {
@@ -143,15 +107,53 @@ int dcn42_get_active_display_cnt_wa(
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc))
- display_count++;
+ active_count++;
+ }
+
+ return active_count > 0;
+}
+
+static uint32_t dcn42_get_clock_freq_from_clkip(struct clk_mgr *clk_mgr_base, enum clock_type clock)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint64_t clock_freq_mhz = 0;
+ uint32_t timer_threshold = 0;
+
+ // always safer to read the timer threshold instead of using cached value
+ REG_GET(CLK8_CLK_TICK_CNT_CONFIG_REG, TIMER_THRESHOLD, &timer_threshold);
+
+ if (timer_threshold == 0) {
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+
+ switch (clock) {
+ case clock_type_dispclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK0_CURRENT_CNT);
+ break;
+ case clock_type_dppclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK1_CURRENT_CNT);
+ break;
+ case clock_type_dprefclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK2_CURRENT_CNT);
+ break;
+ case clock_type_dcfclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK3_CURRENT_CNT);
+ break;
+ case clock_type_dtbclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK4_CURRENT_CNT);
+ break;
+ default:
+ break;
}
- if (all_active_disps != NULL)
- *all_active_disps = display_count;
- /* WA for hang on HDMI after display off back on*/
- if (display_count == 0 && tmds_present)
- display_count = 1;
- return display_count;
+ clock_freq_mhz *= DCN42_CLKIP_REFCLK;
+ clock_freq_mhz = div_u64(clock_freq_mhz, timer_threshold);
+
+ // there are no DCN clocks over 0xFFFFFFFF MHz
+ ASSERT(clock_freq_mhz <= 0xFFFFFFFF);
+
+ return (uint32_t)clock_freq_mhz;
}
void dcn42_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
@@ -213,22 +215,18 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
- int all_active_disps = 0;
+ bool has_active_display;
if (dc->work_arounds.skip_clock_update)
return;
- display_count = dcn42_get_active_display_cnt_wa(dc, context, &all_active_disps);
+ has_active_display = dcn42_has_active_display(dc, context);
- /*dml21 issue*/
- ASSERT(new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000); //remove this section if assert is hit
if (new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz < 590000)
new_clocks->ref_dtbclk_khz = 600000;
-
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -248,7 +246,7 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
/* if we can go lower, go lower */
- if (display_count == 0)
+ if (has_active_display == false)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
} else {
@@ -262,9 +260,7 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
dcn42_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
dcn42_smu_set_dtbclk(clk_mgr, true);
- if (clk_mgr_base->boot_snapshot.timer_threhold)
- actual_dtbclk = REG_READ(CLK8_CLK4_CURRENT_CNT) / (clk_mgr_base->boot_snapshot.timer_threhold / 48000);
-
+ actual_dtbclk = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dtbclk);
if (actual_dtbclk > 590000) {
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
@@ -308,7 +304,7 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
- (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && has_active_display == false))) {
int requested_dispclk_khz = new_clocks->dispclk_khz;
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
@@ -386,34 +382,27 @@ bool dcn42_are_clock_states_equal(struct dc_clocks *a,
static void dcn42_dump_clk_registers_internal(struct dcn42_clk_internal *internal, struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- uint32_t ratio = 1;
-
- internal->CLK8_CLK_TICK_CNT__TIMER_THRESHOLD = REG_READ(CLK8_CLK_TICK_CNT_CONFIG_REG) & 0xFFFFFF;
-
- ratio = internal->CLK8_CLK_TICK_CNT__TIMER_THRESHOLD / 48000;
- ASSERT(ratio != 0);
-
- if (ratio) {
- // read dcf deep sleep divider
- internal->CLK8_CLK0_DS_CNTL = REG_READ(CLK8_CLK0_DS_CNTL);
- internal->CLK8_CLK3_DS_CNTL = REG_READ(CLK8_CLK3_DS_CNTL);
- // read dispclk
- internal->CLK8_CLK0_CURRENT_CNT = REG_READ(CLK8_CLK0_CURRENT_CNT) / ratio;
- internal->CLK8_CLK0_BYPASS_CNTL = REG_READ(CLK8_CLK0_BYPASS_CNTL);
- // read dppclk
- internal->CLK8_CLK1_CURRENT_CNT = REG_READ(CLK8_CLK1_CURRENT_CNT) / ratio;
- internal->CLK8_CLK1_BYPASS_CNTL = REG_READ(CLK8_CLK1_BYPASS_CNTL);
- // read dprefclk
- internal->CLK8_CLK2_CURRENT_CNT = REG_READ(CLK8_CLK2_CURRENT_CNT) / ratio;
- internal->CLK8_CLK2_BYPASS_CNTL = REG_READ(CLK8_CLK2_BYPASS_CNTL);
- // read dcfclk
- internal->CLK8_CLK3_CURRENT_CNT = REG_READ(CLK8_CLK3_CURRENT_CNT) / ratio;
- internal->CLK8_CLK3_BYPASS_CNTL = REG_READ(CLK8_CLK3_BYPASS_CNTL);
- // read dtbclk
- internal->CLK8_CLK4_CURRENT_CNT = REG_READ(CLK8_CLK4_CURRENT_CNT) / ratio;
- internal->CLK8_CLK4_BYPASS_CNTL = REG_READ(CLK8_CLK4_BYPASS_CNTL);
- }
+ REG_GET(CLK8_CLK_TICK_CNT_CONFIG_REG, TIMER_THRESHOLD, &internal->CLK8_CLK_TICK_CNT__TIMER_THRESHOLD);
+
+ // read dcf deep sleep divider
+ internal->CLK8_CLK0_DS_CNTL = REG_READ(CLK8_CLK0_DS_CNTL);
+ internal->CLK8_CLK3_DS_CNTL = REG_READ(CLK8_CLK3_DS_CNTL);
+ // read dispclk
+ internal->CLK8_CLK0_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dispclk);
+ internal->CLK8_CLK0_BYPASS_CNTL = REG_READ(CLK8_CLK0_BYPASS_CNTL);
+ // read dppclk
+ internal->CLK8_CLK1_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dppclk);
+ internal->CLK8_CLK1_BYPASS_CNTL = REG_READ(CLK8_CLK1_BYPASS_CNTL);
+ // read dprefclk
+ internal->CLK8_CLK2_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dprefclk);
+ internal->CLK8_CLK2_BYPASS_CNTL = REG_READ(CLK8_CLK2_BYPASS_CNTL);
+ // read dcfclk
+ internal->CLK8_CLK3_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dcfclk);
+ internal->CLK8_CLK3_BYPASS_CNTL = REG_READ(CLK8_CLK3_BYPASS_CNTL);
+ // read dtbclk
+ internal->CLK8_CLK4_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dtbclk);
+ internal->CLK8_CLK4_BYPASS_CNTL = REG_READ(CLK8_CLK4_BYPASS_CNTL);
}
static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
@@ -422,8 +411,11 @@ static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs
struct dcn42_clk_internal internal = {0};
char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
+ DC_LOGGER_INIT(clk_mgr->base.base.ctx->logger);
+ (void)dc_logger;
+
dcn42_dump_clk_registers_internal(&internal, &clk_mgr->base.base);
- regs_and_bypass->timer_threhold = internal.CLK8_CLK_TICK_CNT__TIMER_THRESHOLD;
+ regs_and_bypass->timer_threshold = internal.CLK8_CLK_TICK_CNT__TIMER_THRESHOLD;
regs_and_bypass->dcfclk = internal.CLK8_CLK3_CURRENT_CNT / 10;
regs_and_bypass->dcf_deep_sleep_divider = internal.CLK8_CLK3_DS_CNTL / 10;
regs_and_bypass->dcf_deep_sleep_allow = internal.CLK8_CLK3_DS_CNTL & 0x10; /*bit 4: CLK0_ALLOW_DS*/
@@ -432,18 +424,10 @@ static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs
regs_and_bypass->dppclk = internal.CLK8_CLK1_CURRENT_CNT / 10;
regs_and_bypass->dtbclk = internal.CLK8_CLK4_CURRENT_CNT / 10;
- regs_and_bypass->dppclk_bypass = internal.CLK8_CLK1_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dppclk_bypass > 4)
- regs_and_bypass->dppclk_bypass = 0;
- regs_and_bypass->dcfclk_bypass = internal.CLK8_CLK3_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dcfclk_bypass > 4)
- regs_and_bypass->dcfclk_bypass = 0;
- regs_and_bypass->dispclk_bypass = internal.CLK8_CLK0_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dispclk_bypass > 4)
- regs_and_bypass->dispclk_bypass = 0;
- regs_and_bypass->dprefclk_bypass = internal.CLK8_CLK2_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dprefclk_bypass > 4)
- regs_and_bypass->dprefclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = get_reg_field_value(internal.CLK8_CLK0_BYPASS_CNTL, CLK8_CLK0_BYPASS_CNTL, CLK0_BYPASS_SEL);
+ regs_and_bypass->dppclk_bypass = get_reg_field_value(internal.CLK8_CLK1_BYPASS_CNTL, CLK8_CLK1_BYPASS_CNTL, CLK1_BYPASS_SEL);
+ regs_and_bypass->dprefclk_bypass = get_reg_field_value(internal.CLK8_CLK2_BYPASS_CNTL, CLK8_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL);
+ regs_and_bypass->dcfclk_bypass = get_reg_field_value(internal.CLK8_CLK3_BYPASS_CNTL, CLK8_CLK3_BYPASS_CNTL, CLK3_BYPASS_SEL);
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
DC_LOG_SMU("clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
@@ -467,7 +451,6 @@ static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs
// REGISTER VALUES
DC_LOG_SMU("reg_name,value,clk_type\n");
-
DC_LOG_SMU("CLK1_CLK3_CURRENT_CNT,%d,dcfclk\n",
internal.CLK8_CLK3_CURRENT_CNT);
@@ -588,6 +571,9 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
struct clk_mgr_dcn42 *clk_mgr = TO_CLK_MGR_DCN42(clk_mgr_int);
struct dcn42_smu_dpm_clks smu_dpm_clks = { 0 };
+ DC_LOGGER_INIT(clk_mgr_base->ctx->logger);
+ (void)dc_logger;
+
init_clk_states(clk_mgr_base);
// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
@@ -597,6 +583,7 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
else
clk_mgr_base->dp_dto_source_clock_in_khz = clk_mgr_base->dprefclk_khz;
+ DC_LOG_SMU("dp_dto_source_clock %d, dprefclk %d\n", clk_mgr_base->dp_dto_source_clock_in_khz, clk_mgr_base->dprefclk_khz);
dcn42_dump_clk_registers(&clk_mgr_base->boot_snapshot, clk_mgr);
clk_mgr_base->clks.ref_dtbclk_khz = clk_mgr_base->boot_snapshot.dtbclk * 10;
@@ -605,6 +592,12 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
clk_mgr_base->clks.dtbclk_en = true;
}
+ if (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels != 0) {
+ /*skip to get clock table and notify pmfw watermark range again*/
+ DC_LOG_SMU("skip to get dpm_clks from pmfw from resume and acr\n");
+ return;
+ }
+
smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn42 *)dm_helpers_allocate_gpu_mem(
clk_mgr_base->ctx,
DC_MEM_ALLOC_TYPE_GART,
@@ -711,10 +704,9 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
/* DTBCLK*/
clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz = clk_mgr_base->clks.ref_dtbclk_khz / 1000;
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels = 1;
-
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
- clk_mgr_base->ctx->dc, clk_mgr_base->bw_params);
+ clk_mgr_base->ctx->dc, clk_mgr_base->bw_params);
}
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
@@ -826,7 +818,6 @@ static void dcn42_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
}
}
-/* Exposed for dcn42b reuse */
void dcn42_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn42_watermarks *table)
{
int i, num_valid_sets;
@@ -885,18 +876,42 @@ void dcn42_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn42_
void dcn42_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
+ int i = 0;
+ struct dcn42_watermarks *table = NULL;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct clk_mgr_dcn42 *clk_mgr_dcn42 = TO_CLK_MGR_DCN42(clk_mgr);
- struct dcn42_watermarks *table = clk_mgr_dcn42->smu_wm_set.wm_set;
if (!clk_mgr->smu_ver)
return;
+ /*send once already skip*/
+ if (clk_mgr_base->bw_params->wm_table.entries[WM_A].valid == true)
+ return;
+ clk_mgr_dcn42->smu_wm_set.wm_set = (struct dcn42_watermarks *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.ctx,
+ DC_MEM_ALLOC_TYPE_GART,
+ sizeof(struct dcn42_watermarks),
+ &clk_mgr_dcn42->smu_wm_set.mc_address.quad_part);
+
+ ASSERT(clk_mgr_dcn42->smu_wm_set.wm_set);
+
+ table = clk_mgr_dcn42->smu_wm_set.wm_set;
if (!table || clk_mgr_dcn42->smu_wm_set.mc_address.quad_part == 0)
return;
memset(table, 0, sizeof(*table));
+ /*same as previous asic, set wm valid before building watermark ranges*/
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ clk_mgr_base->bw_params->wm_table.entries[i].wm_inst = i;
+ if (i >= clk_mgr_base->bw_params->clk_table.num_entries) {
+ clk_mgr_base->bw_params->wm_table.entries[i].valid = false;
+ continue;
+ }
+ clk_mgr_base->bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
+ clk_mgr_base->bw_params->wm_table.entries[i].valid = true;
+ }
+ /* build watermark_range will check this valid range*/
dcn42_build_watermark_ranges(clk_mgr_base->bw_params, table);
dcn42_smu_set_dram_addr_high(clk_mgr,
@@ -904,18 +919,21 @@ void dcn42_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
dcn42_smu_set_dram_addr_low(clk_mgr,
clk_mgr_dcn42->smu_wm_set.mc_address.low_part);
dcn42_smu_transfer_wm_table_dram_2_smu(clk_mgr);
+
+ if (clk_mgr_dcn42->smu_wm_set.wm_set && clk_mgr_dcn42->smu_wm_set.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
+ clk_mgr_dcn42->smu_wm_set.wm_set);
+
}
void dcn42_set_low_power_state(struct clk_mgr *clk_mgr_base)
{
- int display_count;
struct dc *dc = clk_mgr_base->ctx->dc;
struct dc_state *context = dc->current_state;
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn42_get_active_display_cnt_wa(dc, context, NULL);
/* if we can go lower, go lower */
- if (display_count == 0)
+ if (dcn42_has_active_display(dc, context) == false)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
@@ -1096,14 +1114,7 @@ void dcn42_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000; /*sync with pmfw*/
-
- clk_mgr->smu_wm_set.wm_set = (struct dcn42_watermarks *)dm_helpers_allocate_gpu_mem(
- clk_mgr->base.base.ctx,
- DC_MEM_ALLOC_TYPE_GART,
- sizeof(struct dcn42_watermarks),
- &clk_mgr->smu_wm_set.mc_address.quad_part);
-
- ASSERT(clk_mgr->smu_wm_set.wm_set);
+ clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
/* Changed from DCN3.2_clock_frequency doc to match
* dcn32_dump_clk_registers from 4 * dentist_vco_freq_khz /
@@ -1112,6 +1123,9 @@ void dcn42_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.smu_present = false;
+ clk_mgr->base.smu_ver = dcn42_smu_get_pmfw_version(&clk_mgr->base);
+ if (clk_mgr->base.smu_ver && clk_mgr->base.smu_ver != -1)
+ clk_mgr->base.smu_present = true;
if (ctx->dc_bios->integrated_info) {
clk_mgr->base.base.dentist_vco_freq_khz = ctx->dc_bios->integrated_info->dentist_vco_freq;
@@ -1122,7 +1136,9 @@ void dcn42_clk_mgr_construct(
dcn42_bw_params.wm_table = ddr5_wm_table;
dcn42_bw_params.vram_type = ctx->dc_bios->integrated_info->memory_type;
dcn42_bw_params.dram_channel_width_bytes = ctx->dc_bios->integrated_info->memory_type == 0x22 ? 8 : 4;
- dcn42_bw_params.num_channels = ctx->dc_bios->integrated_info->ma_channel_number ? ctx->dc_bios->integrated_info->ma_channel_number : 4;
+ dcn42_bw_params.num_channels = ctx->dc_bios->integrated_info->ma_channel_number ? ctx->dc_bios->integrated_info->ma_channel_number : 1;
+ clk_mgr->base.base.dprefclk_khz = dcn42_smu_get_dprefclk(&clk_mgr->base);
+ clk_mgr->base.base.clks.ref_dtbclk_khz = dcn42_smu_get_dtbclk(&clk_mgr->base);
}
/* in case we don't get a value from the BIOS, use default */
if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
@@ -1131,9 +1147,6 @@ void dcn42_clk_mgr_construct(
/* Saved clocks configured at boot for debug purposes */
dcn42_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
- if (clk_mgr->base.smu_present)
- clk_mgr->base.base.dprefclk_khz = dcn42_smu_get_dprefclk(&clk_mgr->base);
- clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
index 99fcdb602c62..5ad027a9edaf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
@@ -59,7 +59,6 @@ void dcn42_clk_mgr_construct(struct dc_context *ctx,
void dcn42_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
-/* Exposed for dcn42b reuse */
void dcn42_init_single_clock(unsigned int *entry_0,
uint32_t *smu_entry_0,
uint8_t num_levels);
@@ -76,4 +75,5 @@ int dcn42_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context, int
void dcn42_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, bool safe_to_lower);
void dcn42_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, int ref_dtbclk_khz);
bool dcn42_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base);
+bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context);
#endif //__DCN42_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 615bf2a01389..31589f22aae0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2895,16 +2895,27 @@ static struct surface_update_descriptor det_surface_update(
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
- if (u->blend_tf || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
+ if (u->cm || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
- if (u->lut3d_func || u->func_shaper) {
+ if (u->cm && (u->cm->flags.bits.lut3d_enable || u->surface->cm.flags.bits.lut3d_enable)) {
update_flags->bits.lut_3d = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
+ if (u->cm && u->cm->flags.bits.lut3d_dma_enable != u->surface->cm.flags.bits.lut3d_dma_enable &&
+ u->cm->flags.bits.lut3d_enable && u->surface->cm.flags.bits.lut3d_enable) {
+ /* Toggling 3DLUT loading between DMA and Host is illegal */
+ BREAK_TO_DEBUGGER();
+ }
+
+ if (u->cm && u->cm->flags.bits.lut3d_enable && !u->cm->flags.bits.lut3d_dma_enable) {
+ /* Host loading 3DLUT requires full update but only stream lock */
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STREAM);
+ }
+
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
// TODO: Should be fast?
@@ -2919,24 +2930,15 @@ static struct surface_update_descriptor det_surface_update(
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
- if (u->cm2_params) {
- if (u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting
- || u->cm2_params->component_settings.lut1d_enable != u->surface->mcm_lut1d_enable
- || u->cm2_params->cm2_luts.lut3d_data.lut3d_src != u->surface->mcm_luts.lut3d_data.lut3d_src) {
- update_flags->bits.mcm_transfer_function_enable_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- }
- }
-
- if (update_flags->bits.lut_3d &&
- u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ if (u->cm_hist_control) {
+ update_flags->bits.cm_hist_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
-
if (check_config->enable_legacy_fast_update &&
(update_flags->bits.gamma_change ||
update_flags->bits.gamut_remap_change ||
update_flags->bits.input_csc_change ||
+ update_flags->bits.cm_hist_change ||
update_flags->bits.coeff_reduction_change)) {
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
@@ -3168,6 +3170,11 @@ static void copy_surface_update_to_plane(
surface->gamma_correction.type =
srf_update->gamma->type;
}
+ if (srf_update->cm_hist_control) {
+ memcpy(&surface->cm_hist_control,
+ srf_update->cm_hist_control,
+ sizeof(surface->cm_hist_control));
+ }
if (srf_update->in_transfer_func) {
surface->in_transfer_func.sdr_ref_white_level =
@@ -3181,24 +3188,12 @@ static void copy_surface_update_to_plane(
sizeof(struct dc_transfer_func_distributed_points));
}
- if (srf_update->cm2_params) {
- surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting;
- surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable;
- surface->mcm_luts = srf_update->cm2_params->cm2_luts;
+ /* Shaper, 3DLUT, 1DLUT */
+ if (srf_update->cm) {
+ memcpy(&surface->cm, srf_update->cm,
+ sizeof(surface->cm));
}
- if (srf_update->func_shaper) {
- memcpy(&surface->in_shaper_func, srf_update->func_shaper,
- sizeof(surface->in_shaper_func));
-
- if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER)
- surface->mcm_luts.shaper = &surface->in_shaper_func;
- }
-
- if (srf_update->lut3d_func)
- memcpy(&surface->lut3d_func, srf_update->lut3d_func,
- sizeof(surface->lut3d_func));
-
if (srf_update->hdr_mult.value)
surface->hdr_mult =
srf_update->hdr_mult;
@@ -3207,17 +3202,6 @@ static void copy_surface_update_to_plane(
surface->sdr_white_level_nits =
srf_update->sdr_white_level_nits;
- if (srf_update->blend_tf) {
- memcpy(&surface->blend_tf, srf_update->blend_tf,
- sizeof(surface->blend_tf));
-
- if (surface->mcm_lut1d_enable)
- surface->mcm_luts.lut1d_func = &surface->blend_tf;
- }
-
- if (srf_update->cm2_params || srf_update->blend_tf)
- surface->lut_bank_a = !surface->lut_bank_a;
-
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
*srf_update->input_csc_color_matrix;
@@ -4501,11 +4485,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
- if (srf_updates[i].cm2_params &&
- srf_updates[i].cm2_params->cm2_luts.lut3d_data.lut3d_src ==
- DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM &&
- srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting ==
- DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT &&
+ if (srf_updates[i].cm &&
+ srf_updates[i].cm->flags.bits.lut3d_enable &&
+ srf_updates[i].cm->flags.bits.lut3d_dma_enable &&
dc->hwss.trigger_3dlut_dma_load)
dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx);
@@ -5073,6 +5055,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update,
fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix;
+ fast_update[i].cm_hist_control = srf_updates[i].cm_hist_control;
}
}
@@ -5090,6 +5073,7 @@ static bool fast_updates_exist(const struct dc_fast_update *fast_update, int sur
fast_update[i].gamut_remap_matrix ||
fast_update[i].input_csc_color_matrix ||
fast_update[i].cursor_csc_color_matrix ||
+ fast_update[i].cm_hist_control ||
fast_update[i].coeff_reduction_factor)
return true;
}
@@ -5110,6 +5094,7 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_
fast_update[i].gamma ||
fast_update[i].gamut_remap_matrix ||
fast_update[i].coeff_reduction_factor ||
+ fast_update[i].cm_hist_control ||
fast_update[i].cursor_csc_color_matrix)
return true;
}
@@ -5151,6 +5136,12 @@ static bool full_update_required(
const struct dc_stream_update *stream_update,
const struct dc_stream_state *stream)
{
+ const union dc_plane_cm_flags blend_only_flags = {
+ .bits = {
+ .blend_enable = 1,
+ }
+ };
+
if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
return true;
@@ -5163,14 +5154,12 @@ static bool full_update_required(
(srf_updates[i].sdr_white_level_nits &&
srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) ||
srf_updates[i].in_transfer_func ||
- srf_updates[i].func_shaper ||
- srf_updates[i].lut3d_func ||
srf_updates[i].surface->force_full_update ||
(srf_updates[i].flip_addr &&
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
- (srf_updates[i].cm2_params &&
- (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
- srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable))))
+ (srf_updates[i].cm &&
+ ((srf_updates[i].cm->flags.all != blend_only_flags.all && srf_updates[i].cm->flags.all != 0) ||
+ (srf_updates[i].surface->cm.flags.all != blend_only_flags.all && srf_updates[i].surface->cm.flags.all != 0)))))
return true;
}
@@ -5945,6 +5934,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
case AMDGPU_FAMILY_GC_11_0_1:
case AMDGPU_FAMILY_GC_11_5_0:
+ case AMDGPU_FAMILY_GC_11_5_4:
if (!dc->debug.dpia_debug.bits.disable_dpia)
return true;
break;
@@ -6897,7 +6887,7 @@ bool dc_capture_register_software_state(struct dc *dc, struct dc_register_softwa
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
/* MPCC blending tree and mode control - capture actual blend configuration */
- state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0;
+ state->mpc.mpcc_mode[i] = (plane_state->cm.blend_func.type != TF_TYPE_BYPASS) ? 1 : 0;
state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0;
state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0;
state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */
@@ -7295,6 +7285,23 @@ static bool update_planes_and_stream_prepare_v3(
ASSERT(scratch->flow == UPDATE_V3_FLOW_INVALID);
dc_exit_ips_for_hw_access(scratch->dc);
+ /* HWSS path determination needs to be done prior to updating the surface and stream states. */
+ struct dc_fast_update fast_update[MAX_SURFACES] = { 0 };
+
+ populate_fast_updates(fast_update,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream_update);
+
+ const bool is_hwss_fast_path_only =
+ fast_update_only(scratch->dc,
+ fast_update,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream_update,
+ scratch->stream) &&
+ !scratch->dc->check_config.enable_legacy_fast_update;
+
if (!update_planes_and_stream_state(
scratch->dc,
scratch->surface_updates,
@@ -7310,26 +7317,7 @@ static bool update_planes_and_stream_prepare_v3(
if (scratch->new_context == scratch->dc->current_state) {
ASSERT(scratch->update_type < UPDATE_TYPE_FULL);
- // TODO: Do we need this to be alive in execute?
- struct dc_fast_update fast_update[MAX_SURFACES] = { 0 };
-
- populate_fast_updates(
- fast_update,
- scratch->surface_updates,
- scratch->surface_count,
- scratch->stream_update
- );
- const bool fast = fast_update_only(
- scratch->dc,
- fast_update,
- scratch->surface_updates,
- scratch->surface_count,
- scratch->stream_update,
- scratch->stream
- )
- // TODO: Can this be used to skip `populate_fast_updates`?
- && !scratch->dc->check_config.enable_legacy_fast_update;
- scratch->flow = fast
+ scratch->flow = is_hwss_fast_path_only
? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST
: UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c9fbb64d706a..8271b12c1a66 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2431,7 +2431,6 @@ static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
bool is_primary;
- DC_LOGGER_INIT(dc->ctx->logger);
slice_count = resource_get_opp_heads_for_otg_master(otg_master,
&state->res_ctx, opp_heads);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index baf820e6eae8..908f79b02102 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -33,6 +33,7 @@
#include "dc_dmub_srv.h"
#include "dc_state_priv.h"
#include "dc_stream_priv.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER dc->ctx->logger
#ifndef MIN
@@ -171,10 +172,12 @@ struct dc_stream_state *dc_create_stream_for_sink(
goto fail;
stream = kzalloc_obj(struct dc_stream_state, GFP_ATOMIC);
+
if (stream == NULL)
goto fail;
stream->update_scratch = kzalloc((int32_t) dc_update_scratch_space_size(), GFP_ATOMIC);
+
if (stream->update_scratch == NULL)
goto fail;
@@ -245,7 +248,6 @@ const struct dc_stream_status *dc_stream_get_status_const(
const struct dc_stream_state *stream)
{
struct dc *dc = stream->ctx->dc;
-
return dc_state_get_stream_status(dc->current_state, stream);
}
@@ -257,6 +259,7 @@ void program_cursor_attributes(
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
+ bool unlock_dmub = false;
if (!stream)
return;
@@ -275,6 +278,12 @@ void program_cursor_attributes(
if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
} else {
+ if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream &&
+ should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) {
+ dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true);
+ unlock_dmub = true;
+ }
+
dc->hwss.cursor_lock(dc, pipe_to_program, true);
if (pipe_to_program->next_odm_pipe)
dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
@@ -297,6 +306,9 @@ void program_cursor_attributes(
dc->hwss.cursor_lock(dc, pipe_to_program, false);
if (pipe_to_program->next_odm_pipe)
dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+
+ if (unlock_dmub)
+ dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false);
}
}
}
@@ -404,6 +416,7 @@ void program_cursor_position(
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
+ bool unlock_dmub = false;
if (!stream)
return;
@@ -423,10 +436,16 @@ void program_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update)
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
- else
+ } else {
+ if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream &&
+ should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) {
+ dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true);
+ unlock_dmub = true;
+ }
dc->hwss.cursor_lock(dc, pipe_to_program, true);
+ }
}
dc->hwss.set_cursor_position(pipe_ctx);
@@ -438,10 +457,14 @@ void program_cursor_position(
}
if (pipe_to_program) {
- if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update)
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) {
dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
- else
+ } else {
dc->hwss.cursor_lock(dc, pipe_to_program, false);
+
+ if (unlock_dmub)
+ dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false);
+ }
}
}
@@ -523,8 +546,10 @@ bool dc_stream_program_cursor_position(
struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
/* trigger event on first pipe with current stream */
- if (stream == pipe_ctx->stream) {
- pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
+ if (stream == pipe_ctx->stream &&
+ pipe_ctx->stream_res.tg->funcs->program_manual_trigger) {
+ pipe_ctx->stream_res.tg->funcs->program_manual_trigger(
+ pipe_ctx->stream_res.tg);
break;
}
}
@@ -984,7 +1009,6 @@ void dc_stream_release_3dlut_for_stream(
if (rmcm_3dlut) {
rmcm_3dlut->isInUse = false;
rmcm_3dlut->stream = NULL;
- rmcm_3dlut->protection_bits = 0;
}
}
@@ -996,7 +1020,6 @@ void dc_stream_init_rmcm_3dlut(struct dc *dc)
for (int i = 0; i < num_rmcm; i++) {
dc->res_pool->rmcm_3dlut[i].isInUse = false;
dc->res_pool->rmcm_3dlut[i].stream = NULL;
- dc->res_pool->rmcm_3dlut[i].protection_bits = 0;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index d4c40b44d909..5f12dcca7f71 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -139,6 +139,9 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state && flags.bits.address)
pipe_ctx->plane_state->status.is_flip_pending = false;
+ if (pipe_ctx->plane_state && flags.bits.histogram)
+ memset(&pipe_ctx->plane_state->status.cm_hist, 0,
+ sizeof(pipe_ctx->plane_state->status.cm_hist));
break;
}
@@ -154,6 +157,12 @@ const struct dc_plane_status *dc_plane_get_status(
if (flags.bits.address)
dc->hwss.update_pending_status(pipe_ctx);
+ if (flags.bits.histogram) {
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+
+ if (dpp && dpp->funcs->dpp_cm_hist_read)
+ dpp->funcs->dpp_cm_hist_read(dpp, &pipe_ctx->plane_state->status.cm_hist);
+ }
}
return plane_status;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 4bdb7bb47c75..c7a09724f569 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -63,7 +63,7 @@ struct dcn_dsc_reg_state;
struct dcn_optc_reg_state;
struct dcn_dccg_reg_state;
-#define DC_VER "3.2.372"
+#define DC_VER "3.2.373"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -1404,15 +1404,50 @@ struct lut_mem_mapping {
struct dc_rmcm_3dlut {
bool isInUse;
const struct dc_stream_state *stream;
- uint8_t protection_bits;
};
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
- struct fixed31_32 hdr_multiplier;
union dc_3dlut_state state;
};
+
+/* 3DLUT DMA (Fast Load) params */
+struct dc_3dlut_dma {
+ struct dc_plane_address addr;
+ enum dc_cm_lut_swizzle swizzle;
+ enum dc_cm_lut_pixel_format format;
+ uint16_t bias; /* FP1.5.10 */
+ uint16_t scale; /* FP1.5.10 */
+ enum dc_cm_lut_size size;
+};
+
+/* color manager */
+union dc_plane_cm_flags {
+ unsigned int all;
+ struct {
+ unsigned int shaper_enable : 1;
+ unsigned int lut3d_enable : 1;
+ unsigned int blend_enable : 1;
+ /* whether legacy (lut3d_func) or DMA is valid */
+ unsigned int lut3d_dma_enable : 1;
+ /* RMCM lut to be used instead of MCM */
+ unsigned int rmcm_enable : 1;
+ unsigned int reserved: 27;
+ } bits;
+};
+
+struct dc_plane_cm {
+ struct kref refcount;
+ struct dc_transfer_func shaper_func;
+ union {
+ struct dc_3dlut lut3d_func;
+ struct dc_3dlut_dma lut3d_dma;
+ };
+ struct dc_transfer_func blend_func;
+ union dc_plane_cm_flags flags;
+};
+
/*
* This structure is filled in by dc_surface_get_status and contains
* the last requested address and the currently active address so the called
@@ -1490,14 +1525,18 @@ struct dc_plane_state {
struct fixed31_32 hdr_mult;
struct colorspace_transform gamut_remap_matrix;
- // TODO: No longer used, remove
- struct dc_hdr_static_metadata hdr_static_ctx;
-
enum dc_color_space color_space;
+ bool lut_bank_a;
+ struct dc_hdr_static_metadata hdr_static_ctx;
struct dc_3dlut lut3d_func;
struct dc_transfer_func in_shaper_func;
struct dc_transfer_func blend_tf;
+ enum dc_cm2_shaper_3dlut_setting mcm_shaper_3dlut_setting;
+ bool mcm_lut1d_enable;
+ struct dc_cm2_func_luts mcm_luts;
+ enum mpcc_movable_cm_location mcm_location;
+ struct dc_plane_cm cm;
struct dc_transfer_func *gamcor_tf;
enum surface_pixel_format format;
@@ -1534,11 +1573,6 @@ struct dc_plane_state {
bool is_statically_allocated;
enum chroma_cositing cositing;
- enum dc_cm2_shaper_3dlut_setting mcm_shaper_3dlut_setting;
- bool mcm_lut1d_enable;
- struct dc_cm2_func_luts mcm_luts;
- bool lut_bank_a;
- enum mpcc_movable_cm_location mcm_location;
struct dc_csc_transform cursor_csc_color_matrix;
bool adaptive_sharpness_en;
int adaptive_sharpness_policy;
@@ -1884,6 +1918,7 @@ struct dc_surface_update {
* change cm2_params.cm2_luts: Fast update
*/
const struct dc_cm2_parameters *cm2_params;
+ const struct dc_plane_cm *cm;
const struct dc_csc_transform *cursor_csc_color_matrix;
unsigned int sdr_white_level_nits;
struct dc_bias_and_scale bias_and_scale;
@@ -1928,6 +1963,10 @@ struct dc_3dlut *dc_create_3dlut_func(void);
void dc_3dlut_func_release(struct dc_3dlut *lut);
void dc_3dlut_func_retain(struct dc_3dlut *lut);
+struct dc_plane_cm *dc_plane_cm_create(void);
+void dc_plane_cm_release(struct dc_plane_cm *cm);
+void dc_plane_cm_retain(struct dc_plane_cm *cm);
+
void dc_post_update_surfaces_to_stream(
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 526f71616f94..6f96c5cf39fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -102,8 +102,7 @@ struct dc_vbios_funcs {
struct bp_external_encoder_control *cntl);
enum bp_result (*dac_load_detection)(
struct dc_bios *bios,
- enum engine_id engine_id,
- struct graphics_object_id ext_enc_id);
+ enum engine_id engine_id);
enum bp_result (*transmitter_control)(
struct dc_bios *bios,
struct bp_transmitter_control *cntl);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 9540f0ead279..7fa336bf1115 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1374,7 +1374,7 @@ union dpcd_replay_configuration {
unsigned char DESYNC_ERROR_STATUS : 1;
unsigned char SINK_DEVICE_REPLAY_STATUS : 3;
unsigned char SINK_FRAME_LOCKED : 2;
- unsigned char RESERVED : 1;
+ unsigned char FRAME_SKIPPING_ERROR_STATUS : 1;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index 14feb843e694..2ad6d9318566 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -31,6 +31,7 @@
union dc_plane_status_update_flags {
struct {
uint32_t address : 1;
+ uint32_t histogram : 1;
} bits;
uint32_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 37d1a79e8241..ba7bf23f2b2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -89,6 +89,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->callbacks = dcn32_spl_callbacks;
break;
case DCN_VERSION_4_01:
+ case DCN_VERSION_4_2:
spl_in->callbacks = dcn401_spl_callbacks;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_trace.h b/drivers/gpu/drm/amd/display/dc/dc_trace.h
index bbec308a3a5e..b7a011646d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_trace.h
@@ -23,8 +23,8 @@
#include "amdgpu_dm_trace.h"
-#define TRACE_DC_PIPE_STATE(pipe_ctx, index, max_pipes) \
- for (index = 0; index < max_pipes; ++index) { \
+#define TRACE_DC_PIPE_STATE(pipe_ctx, max_pipes) \
+ for (int index = 0; index < max_pipes; ++index) { \
struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[index]; \
if (pipe_ctx->plane_state) \
trace_amdgpu_dm_dc_pipe_state(pipe_ctx->pipe_idx, pipe_ctx->plane_state, \
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index d2e60480fb2b..e224077c8902 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1194,6 +1194,8 @@ struct replay_config {
union replay_optimization replay_optimization;
/* Replay sub feature Frame Skipping is supported */
bool frame_skip_supported;
+ /* Replay Received Frame Skipping Error HPD. */
+ bool received_frame_skipping_error_hpd;
};
/* Replay feature flags*/
@@ -1481,4 +1483,28 @@ struct dc_validation_dpia_set {
uint32_t required_bw;
};
+enum dc_cm_lut_swizzle {
+ CM_LUT_3D_SWIZZLE_LINEAR_RGB,
+ CM_LUT_3D_SWIZZLE_LINEAR_BGR,
+ CM_LUT_1D_PACKED_LINEAR
+};
+
+enum dc_cm_lut_pixel_format {
+ CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12MSB,
+ CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12MSB,
+ CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12LSB,
+ CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12LSB,
+ CM_LUT_PIXEL_FORMAT_RGBA16161616_FLOAT_FP1_5_10,
+ CM_LUT_PIXEL_FORMAT_BGRA16161616_FLOAT_FP1_5_10
+};
+
+enum dc_cm_lut_size {
+ CM_LUT_SIZE_NONE,
+ CM_LUT_SIZE_999,
+ CM_LUT_SIZE_171717,
+ CM_LUT_SIZE_333333,
+ CM_LUT_SIZE_454545,
+ CM_LUT_SIZE_656565,
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index ffcd2e139e76..088cf305a772 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -38,7 +38,11 @@
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\
SR(DISPCLK_FREQ_CHANGE_CNTL),\
- SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV),\
+ SR(MILLISECOND_TIME_BASE_DIV),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
+ SR(DCCG_GATE_DISABLE_CNTL2)
#define DCCG_REG_LIST_DCN2() \
DCCG_COMMON_REG_LIST_DCN_BASE(),\
@@ -370,7 +374,8 @@
type OTG1_DROP_PIXEL;\
type OTG2_DROP_PIXEL;\
type OTG3_ADD_PIXEL;\
- type OTG3_DROP_PIXEL;
+ type OTG3_DROP_PIXEL;\
+ type RESYNC_FIFO_LEVEL_ADJUST_EN;
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
index 75c69348027e..c4d4eea140f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
@@ -96,6 +96,25 @@ static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppcl
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
+/*
+ * On DCN21 S0i3 resume, BIOS programs MICROSECOND_TIME_BASE_DIV to
+ * 0x00120464 as a marker that golden init has already been done.
+ * dcn21_s0i3_golden_init_wa() reads this marker later in bios_golden_init()
+ * to decide whether to skip golden init.
+ *
+ * dccg2_init() unconditionally overwrites MICROSECOND_TIME_BASE_DIV to
+ * 0x00120264, destroying the marker before it can be read.
+ *
+ * Guard the call: if the S0i3 marker is present, skip dccg2_init() so the
+ * WA can function correctly. bios_golden_init() will handle init in that case.
+ */
+static void dccg21_init(struct dccg *dccg)
+{
+ if (dccg2_is_s0i3_golden_init_wa_done(dccg))
+ return;
+
+ dccg2_init(dccg);
+}
static const struct dccg_funcs dccg21_funcs = {
.update_dpp_dto = dccg21_update_dpp_dto,
@@ -103,7 +122,7 @@ static const struct dccg_funcs dccg21_funcs = {
.set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
.otg_add_pixel = dccg2_otg_add_pixel,
.otg_drop_pixel = dccg2_otg_drop_pixel,
- .dccg_init = dccg2_init,
+ .dccg_init = dccg21_init,
.refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */
.allow_clock_gating = dccg2_allow_clock_gating,
.enable_memory_low_power = dccg2_enable_memory_low_power,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
index 067e49cb238e..e2381ca0be0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
@@ -34,7 +34,13 @@
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
- SR(REFCLK_CNTL)
+ SR(REFCLK_CNTL),\
+ SR(DISPCLK_FREQ_CHANGE_CNTL),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV),\
+ SR(MILLISECOND_TIME_BASE_DIV),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
+ SR(DCCG_GATE_DISABLE_CNTL2)
#define DCCG_MASK_SH_LIST_DCN301(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
index bf659920d4cc..b5e3849ef12a 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
@@ -64,9 +64,12 @@
SR(DSCCLK1_DTO_PARAM),\
SR(DSCCLK2_DTO_PARAM),\
SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
SR(DCCG_GATE_DISABLE_CNTL2),\
SR(DCCG_GATE_DISABLE_CNTL3),\
- SR(HDMISTREAMCLK0_DTO_PARAM)
+ SR(HDMISTREAMCLK0_DTO_PARAM),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV)
#define DCCG_MASK_SH_LIST_DCN31(mask_sh) \
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
index a609635f35db..ecbdc05f7c45 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
@@ -70,11 +70,14 @@
SR(DSCCLK2_DTO_PARAM),\
SR(DSCCLK3_DTO_PARAM),\
SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
SR(DCCG_GATE_DISABLE_CNTL2),\
SR(DCCG_GATE_DISABLE_CNTL3),\
SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL)
+ SR(DTBCLK_P_CNTL),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV)
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index a37f94dec6f2..4b9a14c679d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -827,6 +827,16 @@ void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint3
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 1);
break;
+ case 4:
+ if (dccg_dcn->dccg_mask->SYMCLKE_FE_ROOT_GATE_DISABLE) {
+ REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
+ SYMCLKE_FE_EN, 1,
+ SYMCLKE_FE_SRC_SEL, link_enc_inst);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 1);
+ }
+ break;
+ default:
+ return;
}
}
@@ -855,6 +865,16 @@ void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint
SYMCLKD_FE_EN, 0,
SYMCLKD_FE_SRC_SEL, 0);
break;
+ case 4:
+ if (dccg_dcn->dccg_mask->SYMCLKE_FE_ROOT_GATE_DISABLE) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 0);
+ REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
+ SYMCLKE_FE_EN, 0,
+ SYMCLKE_FE_SRC_SEL, 0);
+ }
+ break;
+ default:
+ return;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
index d1593dc68e36..19dfc3fe5c3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
@@ -180,6 +180,61 @@ void dccg42_set_physymclk(
}
}
+void dccg42_set_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div tmds_div,
+ enum pixel_rate_div unused)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t cur_tmds_div = PIXEL_RATE_DIV_NA;
+ uint32_t dp_dto_int;
+ uint32_t reg_val;
+
+ // only 2 and 4 are valid on dcn401
+ if (tmds_div != PIXEL_RATE_DIV_BY_2 && tmds_div != PIXEL_RATE_DIV_BY_4) {
+ return;
+ }
+
+ dccg401_get_pixel_rate_div(dccg, otg_inst, &cur_tmds_div, &dp_dto_int);
+ if (tmds_div == cur_tmds_div)
+ return;
+
+ // encode enum to register value
+ reg_val = tmds_div == PIXEL_RATE_DIV_BY_4 ? 1 : 0;
+
+ switch (otg_inst) {
+ case 0:
+ REG_UPDATE(OTG_PIXEL_RATE_DIV,
+ OTG0_TMDS_PIXEL_RATE_DIV, reg_val);
+ break;
+ case 1:
+ REG_UPDATE(OTG_PIXEL_RATE_DIV,
+ OTG1_TMDS_PIXEL_RATE_DIV, reg_val);
+ break;
+ case 2:
+ REG_UPDATE(OTG_PIXEL_RATE_DIV,
+ OTG2_TMDS_PIXEL_RATE_DIV, reg_val);
+ break;
+ case 3:
+ REG_UPDATE(OTG_PIXEL_RATE_DIV,
+ OTG3_TMDS_PIXEL_RATE_DIV, reg_val);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg42_trigger_dio_fifo_resync(struct dccg *dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE(DISPCLK_FREQ_CHANGE_CNTL, RESYNC_FIFO_LEVEL_ADJUST_EN, 1);
+ REG_UPDATE(DISPCLK_FREQ_CHANGE_CNTL, RESYNC_FIFO_LEVEL_ADJUST_EN, 0);
+ REG_WAIT(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, 1, 50, 2000);
+}
+
static void dccg42_init(struct dccg *dccg)
{
int otg_inst;
@@ -240,9 +295,9 @@ static const struct dccg_funcs dccg42_funcs = {
.otg_drop_pixel = dccg42_otg_drop_pixel,
.disable_dsc = dccg35_disable_dscclk,
.enable_dsc = dccg35_enable_dscclk,
- .set_pixel_rate_div = dccg401_set_pixel_rate_div,
+ .set_pixel_rate_div = dccg42_set_pixel_rate_div,
.get_pixel_rate_div = dccg401_get_pixel_rate_div,
- .trigger_dio_fifo_resync = dccg35_trigger_dio_fifo_resync,
+ .trigger_dio_fifo_resync = dccg42_trigger_dio_fifo_resync,
.set_dp_dto = dccg401_set_dp_dto,
.enable_symclk_se = dccg35_enable_symclk_se,
.disable_symclk_se = dccg35_disable_symclk_se,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h
index 96eae0003f43..d9831b0f8235 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h
@@ -238,7 +238,8 @@
DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, mask_sh),\
DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, mask_sh),\
- DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh)
+ DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, RESYNC_FIFO_LEVEL_ADJUST_EN, mask_sh)
void dccg42_otg_add_pixel(struct dccg *dccg,
@@ -254,6 +255,14 @@ void dccg42_set_physymclk(
enum physymclk_clock_source clk_src,
bool force_enable);
+void dccg42_set_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div tmds_div,
+ enum pixel_rate_div unused);
+
+void dccg42_trigger_dio_fifo_resync(struct dccg *dccg);
+
struct dccg *dccg42_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 7116fdd4c7ec..d0ffa99f1fe0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -28,6 +28,14 @@
#include "dc_types.h"
#include "core_types.h"
+static bool dmub_hw_lock_has_inbox0_lock(const struct dc *dc)
+{
+ return dc->ctx && dc->ctx->dmub_srv &&
+ dc->hwss.dmub_hw_control_lock &&
+ dc->hwss.dmub_hw_control_lock_fast &&
+ dc->ctx->dmub_srv->dmub->meta_info.feature_bits.bits.inbox0_lock_support;
+}
+
void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
bool lock,
union dmub_hw_lock_flags *hw_locks,
@@ -105,5 +113,13 @@ bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link
if (dc->ctx->dce_version >= DCN_VERSION_4_01)
return false;
+ if (dmub_hw_lock_has_inbox0_lock(dc))
+ return false;
+
return dmub_hw_lock_mgr_does_link_require_lock(dc, link);
}
+
+bool should_use_dmub_inbox0_lock_for_link(const struct dc *dc, const struct dc_link *link)
+{
+ return dmub_hw_lock_has_inbox0_lock(dc) && dmub_hw_lock_mgr_does_link_require_lock(dc, link);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
index 4c80ca8484ad..3e8caa930390 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -46,7 +46,38 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
* Return: true if the inbox1 lock should be used, false otherwise
*/
bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link);
+
+/**
+ * dmub_hw_lock_mgr_does_link_require_lock() - Returns true if the link has a feature that needs the HW lock.
+ *
+ * @dc: Pointer to DC object
+ * @link: The link to check
+ *
+ * Return: true if the link has a feature that needs the HW lock, false otherwise
+ */
bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link);
+
+/**
+ * dmub_hw_lock_mgr_does_context_require_lock() - Returns true if the context has any stream that needs the HW lock.
+ *
+ * @dc: Pointer to DC object
+ * @context: The context to check
+ *
+ * Return: true if the context has any stream that needs the HW lock, false otherwise
+ */
bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context);
+/**
+ * should_use_dmub_inbox0_lock_for_link() - Checks if the inbox0 interlock with DMU should be used.
+ *
+ * Is not functionally equivalent to inbox1 as DMUB will not own programming of the relevant locking
+ * registers.
+ *
+ * @dc: pointer to DC object
+ * @link: optional pointer to the link object to check for enabled link features
+ *
+ * Return: true if the inbox0 lock should be used, false otherwise
+ */
+bool should_use_dmub_inbox0_lock_for_link(const struct dc *dc, const struct dc_link *link);
+
#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
index 30cfc0848792..70d9f2cd0b60 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
@@ -90,6 +90,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
@@ -107,6 +108,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(d
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
@@ -124,6 +126,7 @@ DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
+DML21 += src/dml2_mcg/dml2_mcg_dcn42.o
DML21 += src/dml2_mcg/dml2_mcg_factory.o
DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
DML21 += src/dml2_pmo/dml2_pmo_factory.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
index 09303c282495..8e8935995fca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
@@ -4089,8 +4089,8 @@ static void CalculateSwathAndDETConfiguration(struct display_mode_lib_scratch_st
dml_uint_t MaximumSwathHeightC[__DML_NUM_PLANES__];
dml_uint_t RoundedUpMaxSwathSizeBytesY[__DML_NUM_PLANES__];
dml_uint_t RoundedUpMaxSwathSizeBytesC[__DML_NUM_PLANES__];
- dml_uint_t RoundedUpSwathSizeBytesY[__DML_NUM_PLANES__];
- dml_uint_t RoundedUpSwathSizeBytesC[__DML_NUM_PLANES__];
+ dml_uint_t RoundedUpSwathSizeBytesY[__DML_NUM_PLANES__] = { 0 };
+ dml_uint_t RoundedUpSwathSizeBytesC[__DML_NUM_PLANES__] = { 0 };
dml_uint_t SwathWidthSingleDPP[__DML_NUM_PLANES__];
dml_uint_t SwathWidthSingleDPPChroma[__DML_NUM_PLANES__];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
index 75a279997961..847fab508750 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
@@ -45,6 +45,9 @@ static enum dml2_project_id dml21_dcn_revision_to_dml2_project_id(enum dce_versi
case DCN_VERSION_4_01:
project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
+ case DCN_VERSION_4_2:
+ project_id = dml2_project_dcn42;
+ break;
default:
project_id = dml2_project_invalid;
DC_ERR("unsupported dcn version for DML21!");
@@ -598,29 +601,31 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->composition.viewport.stationary = false;
- if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
+ if (plane_state->cm.flags.bits.lut3d_dma_enable) {
plane->tdlut.setup_for_tdlut = true;
- switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
+ switch (plane_state->cm.lut3d_dma.swizzle) {
+ case CM_LUT_3D_SWIZZLE_LINEAR_RGB:
+ case CM_LUT_3D_SWIZZLE_LINEAR_BGR:
plane->tdlut.tdlut_addressing_mode = dml2_tdlut_sw_linear;
break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
+ case CM_LUT_1D_PACKED_LINEAR:
+ default:
plane->tdlut.tdlut_addressing_mode = dml2_tdlut_simple_linear;
break;
}
- switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_171717:
- plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
+ switch (plane_state->cm.lut3d_dma.size) {
+ case CM_LUT_SIZE_333333:
+ plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube;
break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ case CM_LUT_SIZE_171717:
default:
- //plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
+ plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
}
}
+
plane->tdlut.setup_for_tdlut |= dml_ctx->config.force_tdlut_enable;
plane->dynamic_meta_data.enable = false;
@@ -824,6 +829,9 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
context->bw_ctx.bw.dcn.clk.stutter_efficiency.base_efficiency = in_ctx->v21.mode_programming.programming->stutter.base_percent_efficiency;
context->bw_ctx.bw.dcn.clk.stutter_efficiency.low_power_efficiency = in_ctx->v21.mode_programming.programming->stutter.low_power_percent_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.z8_stutter_efficiency = in_ctx->v21.mode_programming.programming->informative.power_management.z8.stutter_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.z8_stutter_period = in_ctx->v21.mode_programming.programming->informative.power_management.z8.stutter_period;
+ context->bw_ctx.bw.dcn.clk.zstate_support = in_ctx->v21.mode_programming.programming->z8_stutter.supported_in_blank; /*ignore meets_eco since it is not used*/
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
@@ -931,3 +939,31 @@ void dml21_set_dc_p_state_type(
}
}
+void dml21_init_min_clocks_for_dc_state(struct dml2_context *in_ctx, struct dc_state *context)
+{
+ unsigned int lowest_dpm_state_index = 0;
+ struct dc_clocks *min_clocks = &context->bw_ctx.bw.dcn.clk;
+
+ min_clocks->dispclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->dppclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->dcfclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dcfclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->dramclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.uclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->fclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.fclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->idle_dramclk_khz = 0;
+ min_clocks->idle_fclk_khz = 0;
+ min_clocks->dcfclk_deep_sleep_khz = 0;
+ min_clocks->fclk_p_state_change_support = true;
+ min_clocks->p_state_change_support = true;
+ min_clocks->dtbclk_en = false;
+ min_clocks->ref_dtbclk_khz = 0;
+ min_clocks->socclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.socclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->subvp_prefetch_dramclk_khz = 0;
+ min_clocks->subvp_prefetch_fclk_khz = 0;
+ min_clocks->phyclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.phyclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->stutter_efficiency.base_efficiency = 1;
+ min_clocks->stutter_efficiency.low_power_efficiency = 1;
+ min_clocks->stutter_efficiency.z8_stutter_efficiency = 1;
+ min_clocks->stutter_efficiency.z8_stutter_period = 100000;
+ min_clocks->zstate_support = DCN_ZSTATE_SUPPORT_ALLOW;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
index 9880d3e0398e..f51d3d8a52c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
@@ -25,4 +25,5 @@ void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context);
+void dml21_init_min_clocks_for_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
index 732b994b8864..ab7ec24268be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
@@ -374,6 +374,7 @@ void dml21_handle_phantom_streams_planes(const struct dc *dc, struct dc_state *c
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, dc->current_state);
}
+
static unsigned int dml21_build_fams2_stream_programming_v2(const struct dc *dc,
struct dc_state *context,
struct dml2_context *dml_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
index 798abb2b2e67..2623e917ec28 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
@@ -9,16 +9,21 @@
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
+#include "dc_fpu.h"
+
+#if !defined(DC_RUN_WITH_PREEMPTION_ENABLED)
+#define DC_RUN_WITH_PREEMPTION_ENABLED(code) code
+#endif // !DC_RUN_WITH_PREEMPTION_ENABLED
#define INVALID -1
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
- *dml_ctx = vzalloc(sizeof(struct dml2_context));
+ DC_RUN_WITH_PREEMPTION_ENABLED(*dml_ctx = vzalloc(sizeof(struct dml2_context)));
if (!(*dml_ctx))
return false;
- (*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance));
+ DC_RUN_WITH_PREEMPTION_ENABLED((*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance)));
if (!((*dml_ctx)->v21.dml_init.dml2_instance))
return false;
@@ -28,7 +33,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
(*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config;
(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
- (*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming));
+ DC_RUN_WITH_PREEMPTION_ENABLED((*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming)));
if (!((*dml_ctx)->v21.mode_programming.programming))
return false;
@@ -70,8 +75,9 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, con
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
{
/* Allocate memory for initializing DML21 instance */
- if (!dml21_allocate_memory(dml_ctx))
+ if (!dml21_allocate_memory(dml_ctx)) {
return false;
+ }
dml21_init(in_dc, *dml_ctx, config);
@@ -215,6 +221,7 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
return true;
if (context->stream_count == 0) {
+ dml21_init_min_clocks_for_dc_state(dml_ctx, context);
dml21_build_fams2_programming(in_dc, context, dml_ctx);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn42_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn42_soc_bb.h
new file mode 100644
index 000000000000..c75778ea7a2c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn42_soc_bb.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: MIT */
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DML_DML_DCN42_SOC_BB__
+#define __DML_DML_DCN42_SOC_BB__
+
+#include "dml_top_soc_parameter_types.h"
+
+static const struct dml2_soc_qos_parameters dml_dcn42_variant_a_soc_qos_params = {
+ .derate_table = {
+ .system_active_urgent = {
+ .dram_derate_percent_pixel = 65,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 60,
+ .fclk_derate_percent = 80,
+ .dcfclk_derate_percent = 80,
+ },
+ .system_active_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ .dcn_mall_prefetch_urgent = {
+ .dram_derate_percent_pixel = 65,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 60,
+ .fclk_derate_percent = 80,
+ .dcfclk_derate_percent = 80,
+ },
+ .dcn_mall_prefetch_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ .system_idle_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ },
+ .writeback = {
+ .base_latency_us = 12,
+ .scaling_factor_us = 0,
+ .scaling_factor_mhz = 0,
+ },
+ .qos_params = {
+ .dcn32x = {
+ .loaded_round_trip_latency_fclk_cycles = 106,
+ .urgent_latency_us = {
+ .base_latency_us = 4,
+ .base_latency_pixel_vm_us = 4,
+ .base_latency_vm_us = 4,
+ .scaling_factor_fclk_us = 0,
+ .scaling_factor_mhz = 0,
+ },
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ },
+ },
+ .qos_type = dml2_qos_param_type_dcn3,
+};
+
+static const struct dml2_soc_bb dml2_socbb_dcn42 = {
+ .clk_table = {
+ .wck_ratio = {
+ .clk_values_khz = {2},
+ },
+ .uclk = {
+ .clk_values_khz = {400000},
+ .num_clk_values = 1,
+ },
+ .fclk = {
+ .clk_values_khz = {400000},
+ .num_clk_values = 1,
+ },
+ .dcfclk = {
+ .clk_values_khz = {200000},
+ .num_clk_values = 1,
+ },
+ .dispclk = {
+ .clk_values_khz = {1500000},
+ .num_clk_values = 1,
+ },
+ .dppclk = {
+ .clk_values_khz = {1500000},
+ .num_clk_values = 1,
+ },
+ .dtbclk = {
+ .clk_values_khz = {600000},
+ .num_clk_values = 1,
+ },
+ .phyclk = {
+ .clk_values_khz = {810000},
+ .num_clk_values = 1,
+ },
+ .socclk = {
+ .clk_values_khz = {600000},
+ .num_clk_values = 1,
+ },
+ .dscclk = {
+ .clk_values_khz = {500000},
+ .num_clk_values = 1,
+ },
+ .phyclk_d18 = {
+ .clk_values_khz = {667000},
+ .num_clk_values = 1,
+ },
+ .phyclk_d32 = {
+ .clk_values_khz = {625000},
+ .num_clk_values = 1,
+ },
+ .dram_config = {
+ .channel_width_bytes = 4,
+ .channel_count = 4,
+ .alt_clock_bw_conversion = true,
+ },
+ },
+
+ .qos_parameters = {
+ .derate_table = {
+ .system_active_urgent = {
+ .dram_derate_percent_pixel = 65,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 60,
+ .fclk_derate_percent = 80,
+ .dcfclk_derate_percent = 80,
+ },
+ .system_active_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ .dcn_mall_prefetch_urgent = {
+ .dram_derate_percent_pixel = 65,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 60,
+ .fclk_derate_percent = 80,
+ .dcfclk_derate_percent = 80,
+ },
+ .dcn_mall_prefetch_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ .system_idle_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ },
+ .writeback = {
+ .base_latency_us = 12,
+ .scaling_factor_us = 0,
+ .scaling_factor_mhz = 0,
+ },
+ .qos_params = {
+ .dcn32x = {
+ .loaded_round_trip_latency_fclk_cycles = 106,
+ .urgent_latency_us = {
+ .base_latency_us = 4,
+ .base_latency_pixel_vm_us = 4,
+ .base_latency_vm_us = 4,
+ .scaling_factor_fclk_us = 0,
+ .scaling_factor_mhz = 0,
+ },
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ },
+ },
+ .qos_type = dml2_qos_param_type_dcn3,
+ },
+
+ .power_management_parameters = {
+ .dram_clk_change_blackout_us = 29,
+ .fclk_change_blackout_us = 0,
+ .g7_ppt_blackout_us = 0,
+ .stutter_enter_plus_exit_latency_us = 11,
+ .stutter_exit_latency_us = 9,
+ .z8_stutter_enter_plus_exit_latency_us = 300,
+ .z8_stutter_exit_latency_us = 200,
+ },
+
+ .vmin_limit = {
+ .dispclk_khz = 632 * 1000,
+ },
+
+ .dprefclk_mhz = 600,
+ .xtalclk_mhz = 24,
+ .pcie_refclk_mhz = 100,
+ .dchub_refclk_mhz = 50,
+ .mall_allocated_for_dcn_mbytes = 64,
+ .max_outstanding_reqs = 256,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .hostvm_min_page_size_kbytes = 4,
+ .gpuvm_min_page_size_kbytes = 256,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_max_non_cached_page_table_levels = 2,
+ .phy_downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.38,
+ .dispclk_dppclk_vco_speed_mhz = 3000,
+ .do_urgent_latency_adjustment = 0,
+ .mem_word_bytes = 32,
+ .num_dcc_mcaches = 8,
+ .mcache_size_bytes = 2048,
+ .mcache_line_size_bytes = 32,
+ .max_fclk_for_uclk_dpm_khz = 2200 * 1000,
+};
+
+static const struct dml2_ip_capabilities dml2_dcn42_max_ip_caps = {
+ .pipe_count = 4,
+ .otg_count = 4,
+ .num_dsc = 4,
+ .max_num_dp2p0_streams = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_dp2p0_outputs = 4,
+ .rob_buffer_size_kbytes = 64,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .config_return_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .cursor_buffer_size = 24,
+ .max_flip_time_us = 110,
+ .max_flip_time_lines = 50,
+ .hostvm_mode = 0,
+ .subvp_drr_scheduling_margin_us = 100,
+ .subvp_prefetch_end_to_mall_start_us = 15,
+ .subvp_fw_processing_delay = 15,
+ .max_vactive_det_fill_delay_us = 400,
+
+ .fams2 = {
+ .max_allow_delay_us = 100 * 1000,
+ .scheduling_delay_us = 550,
+ .vertical_interrupt_ack_delay_us = 40,
+ .allow_programming_delay_us = 18,
+ .min_allow_width_us = 20,
+ .subvp_df_throttle_delay_us = 100,
+ .subvp_programming_delay_us = 200,
+ .subvp_prefetch_to_mall_delay_us = 18,
+ .drr_programming_delay_us = 35,
+
+ .lock_timeout_us = 5000,
+ .recovery_timeout_us = 5000,
+ .flip_programming_delay_us = 300,
+ },
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
index b44762e21550..4e9abe1a568d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
@@ -27,6 +27,19 @@ enum dml2_swizzle_mode {
dml2_gfx11_sw_256kb_d_x,
dml2_gfx11_sw_256kb_r_x,
+ dml2_sw_linear_256b, // GFX10 SW_LINEAR only accepts 256 byte aligned pitch
+ dml2_gfx10_sw_64kb_r_x,
+ dml2_gfx102_sw_64kb_s,
+ dml2_gfx102_sw_64kb_s_t,
+ dml2_gfx102_sw_64kb_s_x,
+ dml2_gfx102_sw_64kb_r_x,
+
+ dml2_linear_64elements, // GFX7 LINEAR_ALIGNED accepts pitch alignment of the maximum of 64 elements or 256 bytes
+ dml2_gfx7_1d_thin,
+ dml2_gfx7_2d_thin_gen_zero,
+ dml2_gfx7_2d_thin_gen_one,
+ dml2_gfx7_2d_thin_arlene,
+ dml2_gfx7_2d_thin_anubis
};
enum dml2_source_format_class {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
index 943fd3f040c3..98b26116cdc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
@@ -19,6 +19,8 @@ enum dml2_project_id {
dml2_project_dcn4x_stage1,
dml2_project_dcn4x_stage2,
dml2_project_dcn4x_stage2_auto_drr_svp,
+ dml2_project_dcn40,
+ dml2_project_dcn42,
};
enum dml2_pstate_change_support {
@@ -79,6 +81,7 @@ struct dml2_options {
struct dml2_pmo_options pmo_options;
};
+
struct dml2_initialize_instance_in_out {
struct dml2_instance *dml2_instance;
struct dml2_options options;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
index eba948e187c1..608b4a305c65 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -78,6 +78,86 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.subvp_swath_height_margin_lines = 16,
};
+struct dml2_core_ip_params core_dcn42_ip_caps_base = {
+ .vblank_nom_default_us = 668,
+ .remote_iommu_outstanding_translations = 256,
+ .rob_buffer_size_kbytes = 64,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .config_return_buffer_segment_size_in_kbytes = 64,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .dpte_buffer_size_in_pte_reqs_luma = 68,
+ .dpte_buffer_size_in_pte_reqs_chroma = 36,
+ .pixel_chunk_size_kbytes = 8,
+ .alpha_pixel_chunk_size_kbytes = 4,
+ .min_pixel_chunk_size_bytes = 1024,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 1171920,
+ .max_line_buffer_lines = 32,
+ .writeback_interface_buffer_size_kbytes = 90,
+
+ //Number of pipes after DCN Pipe harvesting
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_opp = 4,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .dppclk_delay_subtotal = 47,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 28,
+ .dppclk_delay_cnvc_cursor = 6,
+ .cursor_buffer_size = 42,
+ .cursor_chunk_size = 2,
+ .dispclk_delay_subtotal = 125,
+ .max_inter_dcn_tile_repeaters = 8,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_buffer_size = 0,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 12,
+ .maximum_pixels_per_line_per_dsc_unit = 5760,
+ .dsc422_native_support = true,
+ .dcc_supported = true,
+ .ptoi_supported = false,
+
+ .cursor_64bpp_support = true,
+ .dynamic_metadata_vm_enabled = false,
+
+ .max_num_hdmi_frl_outputs = 0,
+ .max_num_dp2p0_outputs = 2,
+ .max_num_dp2p0_streams = 4,
+ .imall_supported = 1,
+ .max_flip_time_us = 110,
+ .max_flip_time_lines = 50,
+ .words_per_channel = 16,
+
+ .subvp_fw_processing_delay_us = 15,
+ .subvp_pstate_allow_width_us = 20,
+ .subvp_swath_height_margin_lines = 16,
+
+ .dcn_mrq_present = 1,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_zs = 64,
+ .dcc_meta_buffer_size_bytes = 6272,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+
+ .dchub_arb_to_ret_delay = 102,
+ .hostvm_mode = 1,
+};
+
static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *ip_caps, const struct dml2_core_ip_params *ip_params)
{
ip_caps->pipe_count = ip_params->max_num_dpp;
@@ -153,6 +233,37 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out)
return true;
}
+bool core_dcn42_initialize(struct dml2_core_initialize_in_out *in_out)
+{
+ struct dml2_core_instance *core = in_out->instance;
+
+ if (!in_out->minimum_clock_table)
+ return false;
+ else
+ core->minimum_clock_table = in_out->minimum_clock_table;
+
+ if (in_out->explicit_ip_bb && in_out->explicit_ip_bb_size > 0) {
+ memcpy(&core->clean_me_up.mode_lib.ip, in_out->explicit_ip_bb, in_out->explicit_ip_bb_size);
+
+ // FIXME_STAGE2:
+ // DV still uses stage1 ip_param_st for each variant, need to patch the ip_caps with ip_param info
+ // Should move DV to use ip_caps but need move more overrides to ip_caps
+ patch_ip_caps_with_explicit_ip_params(in_out->ip_caps, in_out->explicit_ip_bb);
+ core->clean_me_up.mode_lib.ip.subvp_pstate_allow_width_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
+ core->clean_me_up.mode_lib.ip.subvp_fw_processing_delay_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
+ core->clean_me_up.mode_lib.ip.subvp_swath_height_margin_lines = core_dcn4_ip_caps_base.subvp_swath_height_margin_lines;
+ } else {
+ memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn42_ip_caps_base, sizeof(struct dml2_core_ip_params));
+ patch_ip_params_with_ip_caps(&core->clean_me_up.mode_lib.ip, in_out->ip_caps);
+ core->clean_me_up.mode_lib.ip.imall_supported = false;
+ }
+
+ memcpy(&core->clean_me_up.mode_lib.soc, in_out->soc_bb, sizeof(struct dml2_soc_bb));
+ memcpy(&core->clean_me_up.mode_lib.ip_caps, in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
+
+ return true;
+}
+
static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters *phantom, const struct dml2_stream_parameters *main,
const struct dml2_implicit_svp_meta *meta)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
index a68bb001a346..5c26d819a673 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
@@ -5,6 +5,7 @@
#ifndef __DML2_CORE_DCN4_H__
#define __DML2_CORE_DCN4_H__
bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out);
+bool core_dcn42_initialize(struct dml2_core_initialize_in_out *in_out);
bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out);
bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out);
bool core_dcn4_populate_informative(struct dml2_core_populate_informative_in_out *in_out);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
index cc4f0663c6d6..6cad99c21139 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
@@ -21,6 +21,7 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
case dml2_project_dcn4x_stage1:
result = false;
break;
+ case dml2_project_dcn40:
case dml2_project_dcn4x_stage2:
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->initialize = &core_dcn4_initialize;
@@ -30,6 +31,14 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
out->calculate_mcache_allocation = &core_dcn4_calculate_mcache_allocation;
result = true;
break;
+ case dml2_project_dcn42:
+ out->initialize = &core_dcn42_initialize;
+ out->mode_support = &core_dcn4_mode_support;
+ out->mode_programming = &core_dcn4_mode_programming;
+ out->populate_informative = &core_dcn4_populate_informative;
+ out->calculate_mcache_allocation = &core_dcn4_calculate_mcache_allocation;
+ result = true;
+ break;
case dml2_project_invalid:
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
index b57d0f6ea6a1..6930ba7ce5b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
@@ -428,6 +428,9 @@ bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_c
unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
{
+ if (dml2_core_utils_get_gfx_version(sw_mode) == 10 || dml2_core_utils_get_gfx_version(sw_mode) == 7) {
+ return dml2_core_utils_get_tile_block_size_bytes_backcompat(sw_mode, byte_per_pixel);
+ }
if (sw_mode == dml2_sw_linear)
return 256;
@@ -459,14 +462,56 @@ unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw
};
}
+unsigned int dml2_core_utils_get_tile_block_size_bytes_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+ if (sw_mode == dml2_sw_linear_256b)
+ return 256;
+ else if (sw_mode == dml2_gfx10_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx102_sw_64kb_s)
+ return 65536;
+ else if (sw_mode == dml2_gfx102_sw_64kb_s_t)
+ return 65536;
+ else if (sw_mode == dml2_gfx102_sw_64kb_s_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx102_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_linear_64elements)
+ return 256;
+ else if (sw_mode == dml2_gfx7_1d_thin)
+ return 256;
+ else if (sw_mode == dml2_gfx7_2d_thin_gen_zero)
+ return (128 * 64 * byte_per_pixel);
+ else if (sw_mode == dml2_gfx7_2d_thin_gen_one)
+ return (128 * 128 * byte_per_pixel);
+ else if (sw_mode == dml2_gfx7_2d_thin_arlene)
+ return (64 * 32 * byte_per_pixel);
+ else if (sw_mode == dml2_gfx7_2d_thin_anubis)
+ return (128 * 128 * byte_per_pixel);
+ else {
+ DML_ASSERT(0);
+ return 256;
+ };
+}
+
bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
{
- return (byte_per_pixel != 2);
+ if (dml2_core_utils_get_gfx_version(sw_mode) == 10 || dml2_core_utils_get_gfx_version(sw_mode) == 7) {
+ return dml2_core_utils_get_segment_horizontal_contiguous_backcompat(sw_mode, byte_per_pixel);
+ } else {
+ return (byte_per_pixel != 2);
+ }
+}
+
+bool dml2_core_utils_get_segment_horizontal_contiguous_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+ return !((byte_per_pixel == 4) &&
+ ((sw_mode == dml2_gfx10_sw_64kb_r_x) || (sw_mode == dml2_gfx102_sw_64kb_s) || (sw_mode == dml2_gfx102_sw_64kb_s_t) || (sw_mode == dml2_gfx102_sw_64kb_s_x)));
}
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
{
- return sw_mode == dml2_sw_linear;
+ return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
};
@@ -499,6 +544,20 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_d_x ||
sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
+ else if (sw_mode == dml2_sw_linear_256b ||
+ sw_mode == dml2_gfx10_sw_64kb_r_x ||
+ sw_mode == dml2_gfx102_sw_64kb_s ||
+ sw_mode == dml2_gfx102_sw_64kb_s_t ||
+ sw_mode == dml2_gfx102_sw_64kb_s_x ||
+ sw_mode == dml2_gfx102_sw_64kb_r_x)
+ version = 10;
+ else if (sw_mode == dml2_linear_64elements ||
+ sw_mode == dml2_gfx7_1d_thin ||
+ sw_mode == dml2_gfx7_2d_thin_gen_zero ||
+ sw_mode == dml2_gfx7_2d_thin_gen_one ||
+ sw_mode == dml2_gfx7_2d_thin_arlene ||
+ sw_mode == dml2_gfx7_2d_thin_anubis)
+ version = 7;
else {
DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
DML_ASSERT(0);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
index 95f0d017add4..471e73ed671c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
@@ -22,6 +22,8 @@ void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_in
bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
+unsigned int dml2_core_utils_get_tile_block_size_bytes_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
+bool dml2_core_utils_get_segment_horizontal_contiguous_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan);
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode);
int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 9d7741fd0adb..d17e59d684fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -802,3 +802,36 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
return true;
}
+bool dpmm_dcn42_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out)
+{
+ const struct dml2_display_cfg *display_cfg = &in_out->display_cfg->display_config;
+ const struct dml2_core_internal_display_mode_lib *mode_lib = &in_out->core->clean_me_up.mode_lib;
+ struct dml2_dchub_global_register_set *dchubbub_regs = &in_out->programming->global_regs;
+
+ double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
+
+ /* set A */
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].refcyc_per_trip_to_mem = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].refcyc_per_meta_trip_to_mem = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].frac_urg_bw_flip = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip * 1000);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].frac_urg_bw_nom = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidth * 1000);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].frac_urg_bw_mall = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthMALL * 1000);
+
+ /* set B */
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B] = dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A];
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_C] = dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A];
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_D] = dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A];
+
+ dchubbub_regs->num_watermark_sets = 4;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index e7b58f2efda4..5fbd07e238a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
@@ -10,5 +10,6 @@
bool dpmm_dcn3_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool dpmm_dcn4_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
+bool dpmm_dcn42_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index dfd01440737d..1f2d9e97f5fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -31,6 +31,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
out->map_watermarks = &dummy_map_watermarks;
result = true;
break;
+ case dml2_project_dcn40:
case dml2_project_dcn4x_stage2:
out->map_mode_to_soc_dpm = &dpmm_dcn3_map_mode_to_soc_dpm;
out->map_watermarks = &dummy_map_watermarks;
@@ -41,6 +42,11 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
out->map_watermarks = &dpmm_dcn4_map_watermarks;
result = true;
break;
+ case dml2_project_dcn42:
+ out->map_mode_to_soc_dpm = &dpmm_dcn4_map_mode_to_soc_dpm;
+ out->map_watermarks = &dpmm_dcn42_map_watermarks;
+ result = true;
+ break;
case dml2_project_invalid:
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
index f54fde8fba90..02da6f45cbf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
@@ -10,4 +10,4 @@
bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool mcg_dcn4_unit_test(void);
-#endif \ No newline at end of file
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c
new file mode 100644
index 000000000000..1f67cbc2c236
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2026 Advanced Micro Devices, Inc.
+
+#include "dml2_mcg_dcn42.h"
+#include "dml_top_soc_parameter_types.h"
+
+static unsigned long long uclk_to_dram_bw_kbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config, unsigned long wck_ratio)
+{
+ unsigned long long bw_kbps = 0;
+
+ bw_kbps = (unsigned long long) uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * wck_ratio * 2;
+ return bw_kbps;
+}
+
+static bool build_min_clk_table_coarse_grained(const struct dml2_soc_bb *soc_bb, struct dml2_mcg_min_clock_table *min_table)
+{
+ int i;
+
+ for (i = 0; i < soc_bb->clk_table.fclk.num_clk_values; i++) {
+ if (i < soc_bb->clk_table.uclk.num_clk_values) {
+ min_table->dram_bw_table.entries[i].pre_derate_dram_bw_kbps =
+ uclk_to_dram_bw_kbps(soc_bb->clk_table.uclk.clk_values_khz[i], &soc_bb->clk_table.dram_config, soc_bb->clk_table.wck_ratio.clk_values_khz[i]);
+ min_table->dram_bw_table.entries[i].min_uclk_khz = soc_bb->clk_table.uclk.clk_values_khz[i];
+ } else {
+ min_table->dram_bw_table.entries[i].pre_derate_dram_bw_kbps = min_table->dram_bw_table.entries[soc_bb->clk_table.uclk.num_clk_values - 1].pre_derate_dram_bw_kbps;
+ min_table->dram_bw_table.entries[i].min_uclk_khz = soc_bb->clk_table.uclk.clk_values_khz[soc_bb->clk_table.uclk.num_clk_values - 1];
+ }
+
+ min_table->dram_bw_table.entries[i].min_dcfclk_khz = soc_bb->clk_table.dcfclk.clk_values_khz[i];
+ min_table->dram_bw_table.entries[i].min_fclk_khz = soc_bb->clk_table.fclk.clk_values_khz[i];
+ }
+ min_table->dram_bw_table.num_entries = soc_bb->clk_table.fclk.num_clk_values;
+
+ return true;
+}
+
+static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_mcg_min_clock_table *min_table)
+{
+ bool result;
+
+ if (!soc_bb || !min_table)
+ return false;
+
+
+ if (soc_bb->clk_table.uclk.num_clk_values > DML_MCG_MAX_CLK_TABLE_SIZE)
+ return false;
+
+ min_table->fixed_clocks_khz.amclk = 0;
+ min_table->fixed_clocks_khz.dprefclk = soc_bb->dprefclk_mhz * 1000;
+ min_table->fixed_clocks_khz.pcierefclk = soc_bb->pcie_refclk_mhz * 1000;
+ min_table->fixed_clocks_khz.dchubrefclk = soc_bb->dchub_refclk_mhz * 1000;
+ min_table->fixed_clocks_khz.xtalclk = soc_bb->xtalclk_mhz * 1000;
+
+ min_table->max_clocks_khz.dispclk = soc_bb->clk_table.dispclk.clk_values_khz[soc_bb->clk_table.dispclk.num_clk_values - 1];
+ min_table->max_clocks_khz.dppclk = soc_bb->clk_table.dppclk.clk_values_khz[soc_bb->clk_table.dppclk.num_clk_values - 1];
+ min_table->max_clocks_khz.dscclk = soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1];
+ min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
+ min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
+
+ min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dtbclk = (unsigned int)((double)min_table->max_clocks_khz.dtbclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+
+ min_table->max_clocks_khz.dcfclk = soc_bb->clk_table.dcfclk.clk_values_khz[soc_bb->clk_table.dcfclk.num_clk_values - 1];
+ min_table->max_clocks_khz.fclk = soc_bb->clk_table.fclk.clk_values_khz[soc_bb->clk_table.fclk.num_clk_values - 1];
+
+ result = build_min_clk_table_coarse_grained(soc_bb, min_table);
+
+ return result;
+}
+
+bool mcg_dcn42_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out)
+{
+ return build_min_clock_table(in_out->soc_bb, in_out->min_clk_table);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.h
new file mode 100644
index 000000000000..d4ea49e3e674
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+//
+// Copyright 2026 Advanced Micro Devices, Inc.
+
+#ifndef __DML2_MCG_DCN42_H__
+#define __DML2_MCG_DCN42_H__
+
+#include "dml2_internal_shared_types.h"
+
+bool mcg_dcn42_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
index c60b8fe90819..3dcd2c250633 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -4,6 +4,7 @@
#include "dml2_mcg_factory.h"
#include "dml2_mcg_dcn4.h"
+#include "dml2_mcg_dcn42.h"
#include "dml2_external_lib_deps.h"
static bool dummy_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out)
@@ -25,11 +26,16 @@ bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *
out->build_min_clock_table = &dummy_build_min_clock_table;
result = true;
break;
+ case dml2_project_dcn40:
case dml2_project_dcn4x_stage2:
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->build_min_clock_table = &mcg_dcn4_build_min_clock_table;
result = true;
break;
+ case dml2_project_dcn42:
+ out->build_min_clock_table = &mcg_dcn42_build_min_clock_table;
+ result = true;
+ break;
case dml2_project_invalid:
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index c26e100fcaf2..e8691983c0eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -23,6 +23,7 @@ static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
.allow_state_increase = true,
},
+
// Then VBlank
{
.per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
@@ -53,6 +54,7 @@ static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
.allow_state_increase = true,
},
+
// Then VActive + VBlank
{
.per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
@@ -113,6 +115,7 @@ static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
.allow_state_increase = true,
},
+
// VActive + 1 VBlank
{
.per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na },
@@ -149,6 +152,7 @@ static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
.allow_state_increase = true,
},
+
// VActive + 1 VBlank
{
.per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank },
@@ -1651,6 +1655,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
if (svp_count > 0 && (pmo->options->disable_svp || !all_timings_support_svp(pmo, display_cfg, svp_stream_mask)))
return false;
+
return is_config_schedulable(pmo, display_cfg, pstate_strategy);
}
@@ -1980,6 +1985,7 @@ static void reset_display_configuration(struct display_configuation_with_meta *d
}
}
+
static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *display_config,
struct dml2_pmo_instance *pmo,
int plane_mask)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 55d2464365d0..4d687fa86caa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -3,8 +3,8 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_pmo_factory.h"
-#include "dml2_pmo_dcn4_fams2.h"
#include "dml2_pmo_dcn3.h"
+#include "dml2_pmo_dcn4_fams2.h"
#include "dml2_external_lib_deps.h"
static bool dummy_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in_out)
@@ -37,6 +37,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
out->optimize_dcc_mcache = pmo_dcn4_fams2_optimize_dcc_mcache;
result = true;
break;
+ case dml2_project_dcn40:
case dml2_project_dcn4x_stage2:
out->initialize = pmo_dcn3_initialize;
@@ -56,6 +57,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
result = true;
break;
+ case dml2_project_dcn42:
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->initialize = pmo_dcn4_fams2_initialize;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
index b90f6263cd85..7218de1824cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
@@ -10,4 +10,4 @@
bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out);
-#endif \ No newline at end of file
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
index 5a33e2f357f4..a6c5031f69c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
@@ -17,6 +17,8 @@ bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
case dml2_project_dcn4x_stage1:
case dml2_project_dcn4x_stage2:
case dml2_project_dcn4x_stage2_auto_drr_svp:
+ case dml2_project_dcn40:
+ case dml2_project_dcn42:
return dml2_top_soc15_initialize_instance(in_out);
case dml2_project_invalid:
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
index 5e14d85821e2..0e3177fe9d27 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
@@ -3,7 +3,6 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_top_legacy.h"
-#include "dml2_top_soc15.h"
#include "dml2_core_factory.h"
#include "dml2_pmo_factory.h"
#include "display_mode_core_structs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
index a6bd75f30d20..d328d92240b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
@@ -410,6 +410,7 @@ struct dml2_core_mode_support_in_out {
} legacy;
};
+
struct dml2_core_mode_programming_in_out {
/*
* Inputs
@@ -501,6 +502,7 @@ struct dml2_core_instance {
bool (*populate_informative)(struct dml2_core_populate_informative_in_out *in_out);
bool (*calculate_mcache_allocation)(struct dml2_calculate_mcache_allocation_in_out *in_out);
+
struct {
struct dml2_core_internal_display_mode_lib mode_lib;
} clean_me_up;
@@ -753,6 +755,7 @@ struct dml2_pmo_instance {
bool (*test_for_stutter)(struct dml2_pmo_test_for_stutter_in_out *in_out);
bool (*optimize_for_stutter)(struct dml2_pmo_optimize_for_stutter_in_out *in_out);
+
struct dml2_pmo_init_data init_data;
struct dml2_pmo_scratch scratch;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
index 66040c877d68..d56e58ce26c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
@@ -357,7 +357,7 @@ static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *st
*/
static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *context)
{
- struct pipe_ctx *subvp_pipes[2];
+ struct pipe_ctx *subvp_pipes[2] = { NULL, NULL };
struct dc_stream_state *phantom = NULL;
uint32_t microschedule_lines = 0;
uint32_t index = 0;
@@ -369,6 +369,9 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
uint32_t time_us = 0;
+ if (pipe == NULL || pipe->stream == NULL)
+ continue;
+
/* Loop to calculate the maximum microschedule time between the two SubVP pipes,
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
@@ -386,14 +389,19 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c
if (time_us > max_microschedule_us)
max_microschedule_us = time_us;
- subvp_pipes[index] = pipe;
- index++;
+ if (index < 2)
+ subvp_pipes[index++] = pipe;
// Maximum 2 SubVP pipes
if (index == 2)
break;
}
}
+
+ /* Minimal guard to avoid C6001 before subvp_pipes[0]/[1] dereference */
+ if (index < 2 || !subvp_pipes[0] || !subvp_pipes[1])
+ return false;
+
vactive1_us = ((subvp_pipes[0]->stream->timing.v_addressable * subvp_pipes[0]->stream->timing.h_total) /
(double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
vactive2_us = ((subvp_pipes[1]->stream->timing.v_addressable * subvp_pipes[1]->stream->timing.h_total) /
@@ -459,6 +467,11 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
break;
}
+ if (pipe == NULL || pipe->stream == NULL) {
+ // Defensive: should never happen, try to catch in debug
+ ASSERT(0);
+ return false;
+ }
phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
main_timing = &pipe->stream->timing;
phantom_timing = &phantom_stream->timing;
@@ -549,6 +562,13 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
+
+ if (subvp_pipe == NULL) {
+ // Defensive: should never happen, catch in debug
+ ASSERT(0);
+ return false;
+ }
+
// Use ignore_msa_timing_param flag to identify as DRR
if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) {
// SUBVP + DRR case
@@ -753,6 +773,12 @@ static void enable_phantom_plane(struct dml2_context *ctx,
return;
}
+ /* Minimal NULL guard for C6011 */
+ if (!phantom_plane) {
+ ASSERT(0);
+ continue;
+ }
+
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
sizeof(phantom_plane->scaling_quality));
@@ -880,6 +906,11 @@ bool dml2_svp_add_phantom_pipe_to_dc_state(struct dml2_context *ctx, struct dc_s
if (ctx->config.svp_pstate.force_disable_subvp)
return false;
+ if (!state) {
+ ASSERT(0);
+ return false;
+ }
+
if (!all_pipes_have_stream_and_plane(ctx, state))
return false;
@@ -898,6 +929,10 @@ bool dml2_svp_add_phantom_pipe_to_dc_state(struct dml2_context *ctx, struct dc_s
}
if (enough_pipes_for_subvp(ctx, state) && assign_subvp_pipe(ctx, state, &dc_pipe_idx)) {
+ if (state->res_ctx.pipe_ctx[dc_pipe_idx].stream == NULL) {
+ ASSERT(0);
+ return false;
+ }
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(ctx, state->res_ctx.pipe_ctx[dc_pipe_idx].stream->stream_id);
svp_height = mode_support_info->SubViewportLinesNeededInMALL[dml_pipe_idx];
vstartup = dml_get_vstartup_calculated(&ctx->v20.dml_core_ctx, dml_pipe_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
index 307186eb6af0..408559d6fb2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
@@ -84,8 +84,9 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01))
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
return dml21_create(in_dc, dml2, config);
+ }
// Allocate Mode Lib Ctx
*dml2 = dml2_allocate_memory();
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index ce91e5d28956..0e70ffc784b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -172,10 +172,14 @@ bool dpp1_get_optimal_number_of_taps(
scl_data->taps.h_taps_c = in_taps->h_taps_c;
if (!dpp->ctx->dc->debug.always_scale) {
- if (IDENTITY_RATIO(scl_data->ratios.horz))
+ if (IDENTITY_RATIO(scl_data->ratios.horz)) {
scl_data->taps.h_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.h_taps_c = 1;
+ }
+ if (IDENTITY_RATIO(scl_data->ratios.vert)) {
scl_data->taps.v_taps = 1;
+ scl_data->taps.v_taps_c = 1;
+ }
if (IDENTITY_RATIO(scl_data->ratios.horz_c))
scl_data->taps.h_taps_c = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert_c))
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index c7923531da83..8a146968ee15 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -524,10 +524,14 @@ bool dpp3_get_optimal_number_of_taps(
scl_data->taps.v_taps_c = max_taps_c;
if (!dpp->ctx->dc->debug.always_scale) {
- if (IDENTITY_RATIO(scl_data->ratios.horz))
+ if (IDENTITY_RATIO(scl_data->ratios.horz)) {
scl_data->taps.h_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.h_taps_c = 1;
+ }
+ if (IDENTITY_RATIO(scl_data->ratios.vert)) {
scl_data->taps.v_taps = 1;
+ scl_data->taps.v_taps_c = 1;
+ }
if (IDENTITY_RATIO(scl_data->ratios.horz_c))
scl_data->taps.h_taps_c = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert_c))
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
index 82eca0e7b7d0..3284084ca7ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
@@ -132,6 +132,8 @@ static void dpp3_power_on_gamcor_lut(
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
if (power_on) {
REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0);
+ if (dpp_base->ctx->dc->caps.ips_v2_support)
+ REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_DIS, 1);
REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
} else {
dpp_base->ctx->dc->optimized_required = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index a62c4733ed3b..8b6155f9122f 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -155,7 +155,12 @@ static void dpp401_power_on_dscl(
if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) {
if (power_on) {
REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0);
- REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5);
+ if (dpp->base.ctx->dc->caps.ips_v2_support) {
+ /*hw default changes to LS*/
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_DIS, 1);
+ REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 100);
+ } else
+ REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5);
} else {
if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) {
dpp->base.ctx->dc->optimized_required = true;
@@ -956,6 +961,15 @@ static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
*bs_coeffs_updated = false;
PERF_TRACE();
+ /*power on isharp_delta_mem first*/
+ if (dpp_base->ctx->dc->caps.ips_v2_support) {
+ /*HW default is LS, need to wake up*/
+ REG_UPDATE_2(ISHARP_DELTA_LUT_MEM_PWR_CTRL,
+ ISHARP_DELTA_LUT_MEM_PWR_FORCE, 0,
+ ISHARP_DELTA_LUT_MEM_PWR_DIS, 1);
+ REG_WAIT(ISHARP_DELTA_LUT_MEM_PWR_CTRL,
+ ISHARP_DELTA_LUT_MEM_PWR_STATE, 0, 1, 100);
+ }
/* ISHARP_MODE */
REG_SET_6(ISHARP_MODE, 0,
ISHARP_EN, scl_data->dscl_prog_data.isharp_en,
@@ -1033,6 +1047,13 @@ static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
}
}
+ /*power on isharp_delta_mem first*/
+ if (dpp_base->ctx->dc->caps.ips_v2_support) {
+ /*HW default is LS, need to wake up*/
+ REG_UPDATE_SEQ_2(ISHARP_DELTA_LUT_MEM_PWR_CTRL,
+ ISHARP_DELTA_LUT_MEM_PWR_FORCE, 0,
+ ISHARP_DELTA_LUT_MEM_PWR_DIS, 0);
+ }
PERF_TRACE();
} // dpp401_dscl_program_isharp
/**
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
index 759b453385c4..92ed130aeaec 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -640,6 +640,11 @@ static void dcn31_hpo_dp_stream_enc_audio_setup(
REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL,
DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst);
+ if (enc3->hpo_se_mask->DP_STREAM_ENC_APG_CLOCK_EN) {
+ /*enable apg clk*/
+ REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL,
+ DP_STREAM_ENC_APG_CLOCK_EN, 1);
+ }
ASSERT(enc->apg);
enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 941dce439e97..b0a4b68cf359 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1151,8 +1151,6 @@ void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_siz
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
- unsigned int cur_compbuf_size_seg = 0;
-
if (safe_to_increase || compbuf_size_seg <= hubbub2->compbuf_size_segments) {
if (compbuf_size_seg > hubbub2->compbuf_size_segments) {
REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
@@ -1165,8 +1163,6 @@ void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_siz
+ hubbub2->det3_size + compbuf_size_seg <= hubbub2->crb_size_segs);
REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_seg);
hubbub2->compbuf_size_segments = compbuf_size_seg;
-
- ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &cur_compbuf_size_seg) && !cur_compbuf_size_seg);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index c205500290ec..4985e885952d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -41,12 +41,12 @@
hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
- const struct dc_plane_address address)
+ const struct dc_plane_address *address)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH, HUBP_3DLUT_ADDRESS_HIGH, address.lut3d.addr.high_part);
- REG_WRITE(HUBP_3DLUT_ADDRESS_LOW, address.lut3d.addr.low_part);
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH, HUBP_3DLUT_ADDRESS_HIGH, address->lut3d.addr.high_part);
+ REG_WRITE(HUBP_3DLUT_ADDRESS_LOW, address->lut3d.addr.low_part);
}
void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group)
@@ -72,33 +72,46 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp)
return ret;
}
-void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode)
+static void hubp401_get_3dlut_fl_xbar_map(
+ const enum dc_cm_lut_pixel_format format,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_cb_b,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_cr_r)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
-
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_ADDRESSING_MODE, addr_mode);
-}
-
-void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width)
-{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
-
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
+ switch (format) {
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_FLOAT_FP1_5_10:
+ /* BGRA */
+ *bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ *bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ break;
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_FLOAT_FP1_5_10:
+ default:
+ /* RGBA */
+ *bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ *bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
}
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits)
+void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
+ const enum dc_cm_lut_pixel_format format)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits);
-}
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r = 0;
-void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r)
-{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ hubp401_get_3dlut_fl_xbar_map(format,
+ &bit_slice_y_g,
+ &bit_slice_cb_b,
+ &bit_slice_cr_r);
REG_UPDATE_3(HUBP_3DLUT_CONTROL,
HUBP_3DLUT_CROSSBAR_SELECT_Y_G, bit_slice_y_g,
@@ -106,62 +119,122 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
HUBP_3DLUT_CROSSBAR_SELECT_CR_R, bit_slice_cr_r);
}
-void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale)
+static enum hubp_3dlut_fl_width hubp401_get_3dlut_fl_width(
+ const enum dc_cm_lut_size size,
+ const enum dc_cm_lut_swizzle swizzle)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_width width = 0;
+
+ switch (size) {
+ case CM_LUT_SIZE_333333:
+ ASSERT(swizzle != CM_LUT_1D_PACKED_LINEAR);
+ width = hubp_3dlut_fl_width_33;
+ break;
+ case CM_LUT_SIZE_171717:
+ if (swizzle != CM_LUT_1D_PACKED_LINEAR) {
+ width = hubp_3dlut_fl_width_17;
+ } else {
+ width = hubp_3dlut_fl_width_17_transformed;
+ }
+ break;
+ default:
+ width = 0;
+ break;
+ }
- REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE, HUBP0_3DLUT_FL_BIAS, bias, HUBP0_3DLUT_FL_SCALE, scale);
+ return width;
}
-void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode)
+static enum hubp_3dlut_fl_format hubp401_get_3dlut_fl_format(
+ const enum dc_cm_lut_pixel_format dc_format)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_format hubp_format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_MODE, mode);
+ switch (dc_format) {
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12MSB:
+ hubp_format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
+ break;
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12LSB:
+ hubp_format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
+ break;
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_FLOAT_FP1_5_10:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_FLOAT_FP1_5_10:
+ hubp_format = hubp_3dlut_fl_format_float_fp1_5_10;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ return hubp_format;
}
-void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format)
+static enum hubp_3dlut_fl_addressing_mode hubp401_get_3dlut_fl_addr_mode(
+ const enum dc_cm_lut_swizzle swizzle)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+
+ switch (swizzle) {
+ case CM_LUT_1D_PACKED_LINEAR:
+ addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
+ break;
+ case CM_LUT_3D_SWIZZLE_LINEAR_RGB:
+ case CM_LUT_3D_SWIZZLE_LINEAR_BGR:
+ default:
+ addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ }
- REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, format);
+ return addr_mode;
}
-void hubp401_program_3dlut_fl_config(
- struct hubp *hubp,
- struct hubp_fl_3dlut_config *cfg)
+static enum hubp_3dlut_fl_mode hubp401_get_3dlut_fl_mode(
+ const enum dc_cm_lut_swizzle swizzle)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_mode mode;
- uint32_t mpc_width = {(cfg->width == 17) ? 0 : 1};
- uint32_t width = {cfg->width};
+ switch (swizzle) {
+ case CM_LUT_3D_SWIZZLE_LINEAR_RGB:
+ mode = hubp_3dlut_fl_mode_native_1;
+ break;
+ case CM_LUT_3D_SWIZZLE_LINEAR_BGR:
+ mode = hubp_3dlut_fl_mode_native_2;
+ break;
+ case CM_LUT_1D_PACKED_LINEAR:
+ mode = hubp_3dlut_fl_mode_transform;
+ break;
+ default:
+ mode = hubp_3dlut_fl_mode_disable;
+ break;
+ }
- if (cfg->layout == DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR)
- width = (cfg->width == 17) ? 4916 : 35940;
+ return mode;
+}
+
+void hubp401_program_3dlut_fl_config(struct hubp *hubp,
+ const struct dc_3dlut_dma *config)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ enum hubp_3dlut_fl_width width = hubp401_get_3dlut_fl_width(config->size, config->swizzle);
+ enum hubp_3dlut_fl_format format = hubp401_get_3dlut_fl_format(config->format);
+ enum hubp_3dlut_fl_addressing_mode addr_mode = hubp401_get_3dlut_fl_addr_mode(config->swizzle);
+ enum hubp_3dlut_fl_mode mode = hubp401_get_3dlut_fl_mode(config->swizzle);
REG_UPDATE_2(_3DLUT_FL_CONFIG,
- HUBP0_3DLUT_FL_MODE, cfg->mode,
- HUBP0_3DLUT_FL_FORMAT, cfg->format);
+ HUBP0_3DLUT_FL_MODE, mode,
+ HUBP0_3DLUT_FL_FORMAT, format);
REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE,
- HUBP0_3DLUT_FL_BIAS, cfg->bias,
- HUBP0_3DLUT_FL_SCALE, cfg->scale);
-
- REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH,
- HUBP_3DLUT_ADDRESS_HIGH, cfg->address.lut3d.addr.high_part);
- REG_UPDATE(HUBP_3DLUT_ADDRESS_LOW,
- HUBP_3DLUT_ADDRESS_LOW, cfg->address.lut3d.addr.low_part);
-
- //cross bar
- REG_UPDATE_8(HUBP_3DLUT_CONTROL,
- HUBP_3DLUT_MPC_WIDTH, mpc_width,
- HUBP_3DLUT_WIDTH, width,
- HUBP_3DLUT_CROSSBAR_SELECT_CR_R, cfg->crossbar_bit_slice_cr_r,
- HUBP_3DLUT_CROSSBAR_SELECT_Y_G, cfg->crossbar_bit_slice_y_g,
- HUBP_3DLUT_CROSSBAR_SELECT_CB_B, cfg->crossbar_bit_slice_cb_b,
- HUBP_3DLUT_ADDRESSING_MODE, cfg->addr_mode,
- HUBP_3DLUT_TMZ, cfg->protection_bits,
- HUBP_3DLUT_ENABLE, cfg->enabled ? 1 : 0);
+ HUBP0_3DLUT_FL_BIAS, config->bias,
+ HUBP0_3DLUT_FL_SCALE, config->scale);
+
+ REG_UPDATE_3(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_WIDTH, width,
+ HUBP_3DLUT_ADDRESSING_MODE, addr_mode,
+ HUBP_3DLUT_TMZ, config->addr.tmz_surface);
}
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor)
@@ -1058,19 +1131,13 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_update_mall_sel = hubp401_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_program_mcache_id_and_split_coordinate = hubp401_program_mcache_id_and_split_coordinate,
- .hubp_update_3dlut_fl_bias_scale = hubp401_update_3dlut_fl_bias_scale,
- .hubp_program_3dlut_fl_mode = hubp401_program_3dlut_fl_mode,
- .hubp_program_3dlut_fl_format = hubp401_program_3dlut_fl_format,
.hubp_program_3dlut_fl_addr = hubp401_program_3dlut_fl_addr,
+ .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
.hubp_program_3dlut_fl_dlg_param = hubp401_program_3dlut_fl_dlg_param,
.hubp_enable_3dlut_fl = hubp401_enable_3dlut_fl,
- .hubp_program_3dlut_fl_addressing_mode = hubp401_program_3dlut_fl_addressing_mode,
- .hubp_program_3dlut_fl_width = hubp401_program_3dlut_fl_width,
- .hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp401_clear_tiling,
- .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
.hubp_read_reg_state = hubp3_read_reg_state
};
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 4570b8016de5..043948f64b86 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -328,32 +328,17 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp);
void hubp401_set_unbounded_requesting(struct hubp *hubp, bool enable);
-void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale);
-
void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
-
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits);
-
-void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
-
-void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
+ const enum dc_cm_lut_pixel_format format);
void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable);
void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group);
-void hubp401_program_3dlut_fl_addr(struct hubp *hubp, const struct dc_plane_address address);
+void hubp401_program_3dlut_fl_addr(struct hubp *hubp, const struct dc_plane_address *address);
-void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format);
-
-void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
-
-void hubp401_program_3dlut_fl_config(
- struct hubp *hubp,
- struct hubp_fl_3dlut_config *cfg);
+void hubp401_program_3dlut_fl_config(struct hubp *hubp,
+ const struct dc_3dlut_dma *config);
void hubp401_clear_tiling(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
index 07c38dc03960..0e33c739f459 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
@@ -147,13 +147,16 @@ static void hubp42_program_pixel_format(
/* don't see the need of program the xbar in DCN 1.0 */
}
-void hubp42_program_deadline(
+static void hubp42_program_deadline(
struct hubp *hubp,
struct dml2_display_dlg_regs *dlg_attr,
struct dml2_display_ttu_regs *ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ /* put DLG in mission mode */
+ REG_WRITE(HUBPREQ_DEBUG_DB, 0);
+
/* DLG - Per hubp */
REG_SET_2(BLANK_OFFSET_0, 0,
REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
@@ -274,19 +277,84 @@ static void hubp42_program_surface_config(
hubp42_program_pixel_format(hubp, format);
}
+static void hubp42_get_3dlut_fl_xbar_map(
+ const enum dc_cm_lut_pixel_format format,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_cb_b,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_cr_r)
+{
+ switch (format) {
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_FLOAT_FP1_5_10:
+ /* BGRA */
+ *bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ *bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ break;
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_FLOAT_FP1_5_10:
+ default:
+ /* RGBA */
+ *bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ *bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
+}
+
void hubp42_program_3dlut_fl_crossbar(struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_r,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_b)
+ const enum dc_cm_lut_pixel_format format)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_r = 0;
+
+ hubp42_get_3dlut_fl_xbar_map(format,
+ &bit_slice_g,
+ &bit_slice_b,
+ &bit_slice_r);
+
REG_UPDATE_3(HUBP_3DLUT_CONTROL,
HUBP_3DLUT_CROSSBAR_SEL_R, bit_slice_r,
HUBP_3DLUT_CROSSBAR_SEL_G, bit_slice_g,
HUBP_3DLUT_CROSSBAR_SEL_B, bit_slice_b);
}
+static uint32_t hubp42_get_3dlut_fl_mpc_width(
+ const enum dc_cm_lut_size size)
+{
+ uint32_t width = 0;
+
+ switch (size) {
+ case CM_LUT_SIZE_333333:
+ width = 1;
+ break;
+ case CM_LUT_SIZE_171717:
+ default:
+ width = 0;
+ break;
+ }
+
+ return width;
+}
+
+void hubp42_program_3dlut_fl_config(struct hubp *hubp,
+ const struct dc_3dlut_dma *config)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ uint32_t mpc_width = hubp42_get_3dlut_fl_mpc_width(config->size);
+
+ REG_UPDATE(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_MPC_WIDTH, mpc_width);
+
+ hubp401_program_3dlut_fl_config(hubp, config);
+}
+
static bool hubp42_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
@@ -548,6 +616,7 @@ struct hubp_funcs dcn42_hubp_funcs = {
.hubp_setup_interdependent2 = hubp401_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
.hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
@@ -567,17 +636,13 @@ struct hubp_funcs dcn42_hubp_funcs = {
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
- .hubp_update_3dlut_fl_bias_scale = hubp401_update_3dlut_fl_bias_scale,
- .hubp_program_3dlut_fl_mode = hubp401_program_3dlut_fl_mode,
- .hubp_program_3dlut_fl_format = hubp401_program_3dlut_fl_format,
.hubp_program_3dlut_fl_addr = hubp401_program_3dlut_fl_addr,
+ .hubp_program_3dlut_fl_config = hubp42_program_3dlut_fl_config,
.hubp_program_3dlut_fl_dlg_param = hubp401_program_3dlut_fl_dlg_param,
.hubp_enable_3dlut_fl = hubp401_enable_3dlut_fl,
- .hubp_program_3dlut_fl_addressing_mode = hubp401_program_3dlut_fl_addressing_mode,
- .hubp_program_3dlut_fl_width = hubp401_program_3dlut_fl_width,
- .hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp42_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
+ .hubp_clear_tiling = hubp3_clear_tiling,
.hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
.hubp_read_reg_state = hubp3_read_reg_state
};
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h
index 976614f38981..486c8907413a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h
@@ -56,11 +56,11 @@ bool hubp42_construct(
const struct dcn_hubp2_shift *hubp_shift,
const struct dcn_hubp2_mask *hubp_mask);
-void hubp42_program_3dlut_fl_crossbar(
- struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_r,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_b);
+void hubp42_program_3dlut_fl_crossbar(struct hubp *hubp,
+ const enum dc_cm_lut_pixel_format format);
+
+void hubp42_program_3dlut_fl_config(struct hubp *hubp,
+ const struct dc_3dlut_dma *config);
void hubp42_read_state(struct hubp *hubp);
@@ -70,10 +70,4 @@ void hubp42_setup(
union dml2_global_sync_programming *pipe_global_sync,
struct dc_crtc_timing *timing);
-void hubp42_program_deadline(
- struct hubp *hubp,
- struct dml2_display_dlg_regs *dlg_attr,
- struct dml2_display_ttu_regs *ttu_attr);
-
-
#endif /* __DC_HUBP_DCN42_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 8aafd460c36f..8a17cc036399 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -86,9 +86,9 @@
hws->ctx
#define DC_LOGGER \
- ctx->logger
-#define DC_LOGGER_INIT() \
- struct dc_context *ctx = dc->ctx
+ dc_ctx->logger
+#define DC_LOGGER_INIT(ctx) \
+ struct dc_context *dc_ctx = ctx
#define REG(reg)\
hws->regs->reg
@@ -661,45 +661,16 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
}
static void
-dce110_external_encoder_control(enum bp_external_encoder_control_action action,
- struct dc_link *link,
- struct dc_crtc_timing *timing)
+dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable)
{
- struct dc *dc = link->ctx->dc;
+ struct dc_link *link = pipe_ctx->stream->link;
struct dc_bios *bios = link->ctx->dc_bios;
- const struct dc_link_settings *link_settings = &link->cur_link_settings;
- enum bp_result bp_result = BP_RESULT_OK;
- struct bp_external_encoder_control ext_cntl = {
- .action = action,
- .connector_obj_id = link->link_enc->connector,
- .encoder_id = link->ext_enc_id,
- .lanes_number = link_settings->lane_count,
- .link_rate = link_settings->link_rate,
-
- /* Use signal type of the real link encoder, ie. DP */
- .signal = link->connector_signal,
-
- /* We don't know the timing yet when executing the SETUP action,
- * so use a reasonably high default value. It seems that ENABLE
- * can change the actual pixel clock but doesn't work with higher
- * pixel clocks than what SETUP was called with.
- */
- .pixel_clock = timing ? timing->pix_clk_100hz / 10 : 300000,
- .color_depth = timing ? timing->display_color_depth : COLOR_DEPTH_888,
- };
- DC_LOGGER_INIT();
-
- bp_result = bios->funcs->external_encoder_control(bios, &ext_cntl);
-
- if (bp_result != BP_RESULT_OK)
- DC_LOG_ERROR("Failed to execute external encoder action: 0x%x\n", action);
-}
+ struct bp_encoder_control encoder_control = {0};
-static void
-dce110_prepare_ddc(struct dc_link *link)
-{
- if (link->ext_enc_id.id)
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_DDC_SETUP, link, NULL);
+ encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE;
+ encoder_control.engine_id = link->link_enc->analog_engine;
+ encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ bios->funcs->encoder_control(bios, &encoder_control);
}
static bool
@@ -709,8 +680,7 @@ dce110_dac_load_detect(struct dc_link *link)
struct link_encoder *link_enc = link->link_enc;
enum bp_result bp_result;
- bp_result = bios->funcs->dac_load_detection(
- bios, link_enc->analog_engine, link->ext_enc_id);
+ bp_result = bios->funcs->dac_load_detection(bios, link_enc->analog_engine);
return bp_result == BP_RESULT_OK;
}
@@ -726,6 +696,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ link_hwss->setup_stream_attribute(pipe_ctx);
link_hwss->setup_stream_encoder(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -744,8 +715,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
tg->funcs->set_early_control(tg, early_control);
- if (link->ext_enc_id.id)
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_ENABLE, link, timing);
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, true);
}
static enum bp_result link_transmitter_control(
@@ -767,13 +738,14 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up)
{
- struct dc_context *ctx = link->ctx;
struct graphics_object_id connector = link->link_enc->connector;
bool edp_hpd_high = false;
uint32_t time_elapsed = 0;
uint32_t timeout = power_up ?
PANEL_POWER_UP_TIMEOUT : PANEL_POWER_DOWN_TIMEOUT;
+ DC_LOGGER_INIT(link->ctx);
+
if (dal_graphics_object_id_get_connector_id(connector)
!= CONNECTOR_ID_EDP) {
BREAK_TO_DEBUGGER();
@@ -825,6 +797,7 @@ void dce110_edp_power_control(
enum bp_result bp_result;
uint8_t pwrseq_instance;
+ DC_LOGGER_INIT(ctx);
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@@ -993,6 +966,8 @@ void dce110_edp_backlight_control(
unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0);
unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0);
+ DC_LOGGER_INIT(ctx);
+
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
BREAK_TO_DEBUGGER();
@@ -1240,8 +1215,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
- if (link->ext_enc_id.id)
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_DISABLE, link, NULL);
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, false);
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1623,6 +1598,22 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
+static void
+dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_crtc_source_select crtc_source_select = {0};
+ enum engine_id engine_id = link->link_enc->preferred_engine;
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ engine_id = link->link_enc->analog_engine;
+ crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
+ crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth;
+ crtc_source_select.engine_id = engine_id;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+ bios->funcs->select_crtc_source(bios, &crtc_source_select);
+}
enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
@@ -1643,6 +1634,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
hws->funcs.disable_stream_gating(dc, pipe_ctx);
}
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) {
+ dce110_select_crtc_source(pipe_ctx);
+ }
+
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output = {0};
@@ -1722,7 +1717,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers, 2);
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal) &&
+ !dc_is_rgb_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1944,6 +1940,35 @@ static void clean_up_dsc_blocks(struct dc *dc)
}
}
+static void dc_hwss_enable_otg_pwa(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct timing_generator *tg = NULL;
+
+ if (dc->debug.enable_otg_frame_sync_pwa == 0)
+ return;
+
+ if (pipe_ctx == NULL || pipe_ctx->stream_res.tg == NULL)
+ return;
+ tg = pipe_ctx->stream_res.tg;
+
+ /*only enable this if one active*/
+ if (tg->funcs->enable_otg_pwa) {
+ struct otc_pwa_frame_sync pwa_param = {0};
+
+ DC_LOGGER_INIT(dc->ctx);
+ /* mode 1 to choose generate pwa sync signal on line 0 counting
+ * from vstartup at very beginning of the frame
+ */
+ pwa_param.pwa_frame_sync_line_offset = 0;
+ pwa_param.pwa_sync_mode = DC_OTG_PWA_FRAME_SYNC_MODE_VSTARTUP;
+ /*frame sync line for generating high frame sync*/
+ tg->funcs->enable_otg_pwa(tg, &pwa_param);
+ DC_LOG_DC("Enable OTG PWA frame sync on TG %d\n", tg->inst);
+ }
+}
+
/*
* When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
* 1. Power down all DC HW blocks
@@ -1969,8 +1994,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool keep_edp_vdd_on = false;
bool should_clean_dsc_block = true;
struct dc_bios *dcb = dc->ctx->dc_bios;
- DC_LOGGER_INIT();
-
+ DC_LOGGER_INIT(dc->ctx);
get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num);
dc_get_edp_links(dc, edp_links, &edp_num);
@@ -2021,6 +2045,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
// If VBios supports it, we check it from reigster or other flags.
pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle = 1;
}
+ dc_hwss_enable_otg_pwa(dc, pipe_ctx);
}
break;
}
@@ -2590,6 +2615,18 @@ enum dc_status dce110_apply_ctx_to_hw(
#endif
}
+
+ if (dc->debug.enable_otg_frame_sync_pwa && context->stream_count == 1) {
+ /* only enable this on one OTG*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx && pipe_ctx->stream != NULL) {
+ dc_hwss_enable_otg_pwa(dc, pipe_ctx);
+ break;
+ }
+ }
+ }
if (dc->fbc_compressor)
enable_fbc(dc, dc->current_state);
@@ -2736,7 +2773,6 @@ static bool wait_for_reset_trigger_to_occur(
struct dc_context *dc_ctx,
struct timing_generator *tg)
{
- struct dc_context *ctx = dc_ctx;
bool rc = false;
/* To avoid endless loop we wait at most
@@ -2778,10 +2814,9 @@ static void dce110_enable_timing_synchronization(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
struct dcp_gsl_params gsl_params = { 0 };
int i;
- DC_LOGGER_INIT();
+ DC_LOGGER_INIT(dc->ctx);
DC_SYNC_INFO("GSL: Setting-up...\n");
@@ -2824,10 +2859,9 @@ static void dce110_enable_per_frame_crtc_position_reset(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
struct dcp_gsl_params gsl_params = { 0 };
int i;
- DC_LOGGER_INIT();
+ DC_LOGGER_INIT(dc->ctx);
gsl_params.gsl_group = 0;
gsl_params.gsl_master = 0;
@@ -3320,15 +3354,6 @@ void dce110_enable_tmds_link_output(struct dc_link *link,
link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
}
-static void dce110_enable_analog_link_output(
- struct dc_link *link,
- uint32_t pix_clk_100hz)
-{
- link->link_enc->funcs->enable_analog_output(
- link->link_enc,
- pix_clk_100hz);
-}
-
void dce110_enable_dp_link_output(
struct dc_link *link,
const struct link_resource *link_res,
@@ -3376,11 +3401,6 @@ void dce110_enable_dp_link_output(
}
}
- if (link->ext_enc_id.id) {
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_INIT, link, NULL);
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_SETUP, link, NULL);
- }
-
if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
@@ -3471,10 +3491,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
- .enable_analog_link_output = dce110_enable_analog_link_output,
.disable_link_output = dce110_disable_link_output,
.dac_load_detect = dce110_dac_load_detect,
- .prepare_ddc = dce110_prepare_ddc,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index a2d28be480e8..17ff66d9a617 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -60,9 +60,9 @@
#include "dc_state_priv.h"
#define DC_LOGGER \
- dc_logger
-#define DC_LOGGER_INIT(logger) \
- struct dal_logger *dc_logger = logger
+ dc_ctx->logger
+#define DC_LOGGER_INIT(ctx) \
+ struct dc_context *dc_ctx = ctx
#define CTX \
hws->ctx
@@ -1009,7 +1009,7 @@ static void power_on_plane_resources(
struct dce_hwseq *hws,
int plane_id)
{
- DC_LOGGER_INIT(hws->ctx->logger);
+ DC_LOGGER_INIT(hws->ctx);
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, plane_id, true);
@@ -1286,7 +1286,7 @@ static void dcn10_reset_back_end_for_pipe(
{
int i;
struct dc_link *link;
- DC_LOGGER_INIT(dc->ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
@@ -1422,12 +1422,10 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
return;
if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
- int i = 0;
-
if (should_log_hw_state)
dcn10_log_hw_state(dc, NULL);
- TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
+ TRACE_DC_PIPE_STATE(pipe_ctx, MAX_PIPES);
BREAK_TO_DEBUGGER();
if (dcn10_hw_wa_force_recovery(dc)) {
/*check again*/
@@ -1490,7 +1488,7 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
struct hubp *hubp)
{
struct dce_hwseq *hws = dc->hwseq;
- DC_LOGGER_INIT(dc->ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
@@ -1554,7 +1552,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
- DC_LOGGER_INIT(dc->ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
return;
@@ -2268,8 +2266,6 @@ static bool wait_for_reset_trigger_to_occur(
{
bool rc = false;
- DC_LOGGER_INIT(dc_ctx->logger);
-
/* To avoid endless loop we wait at most
* frames_to_wait_on_triggered_reset frames for the reset to occur. */
const uint32_t frames_to_wait_on_triggered_reset = 10;
@@ -2384,7 +2380,6 @@ static uint8_t get_clock_divider(struct pipe_ctx *pipe,
static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
int i, master = -1, embedded = -1;
struct dc_crtc_timing *hw_crtc_timing;
uint64_t phase[MAX_PIPES];
@@ -2397,7 +2392,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
uint32_t dp_ref_clk_100hz =
dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
- DC_LOGGER_INIT(dc_ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
hw_crtc_timing = kzalloc_objs(*hw_crtc_timing, MAX_PIPES);
if (!hw_crtc_timing)
@@ -2477,12 +2472,11 @@ void dcn10_enable_vblanks_synchronization(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
int i, width = 0, height = 0, master;
- DC_LOGGER_INIT(dc_ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
for (i = 1; i < group_size; i++) {
opp = grouped_pipes[i]->stream_res.opp;
@@ -2543,12 +2537,11 @@ void dcn10_enable_timing_synchronization(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
int i, width = 0, height = 0;
- DC_LOGGER_INIT(dc_ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
DC_SYNC_INFO("Setting up OTG reset trigger\n");
@@ -2624,10 +2617,9 @@ void dcn10_enable_per_frame_crtc_position_reset(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
int i;
- DC_LOGGER_INIT(dc_ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
DC_SYNC_INFO("Setting up\n");
for (i = 0; i < group_size; i++)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index db2f7cbb12ff..94f63fd54e3e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -513,7 +513,6 @@ static void dcn31_reset_back_end_for_pipe(
{
struct dc_link *link;
- DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index b5a4cefbd35f..b5f60f59382e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -60,15 +60,15 @@
#include "dcn20/dcn20_hwseq.h"
#include "dc_state_priv.h"
-#define DC_LOGGER_INIT(logger) \
- struct dal_logger *dc_logger = logger
+#define DC_LOGGER \
+ dc_ctx->logger
+#define DC_LOGGER_INIT(ctx) \
+ struct dc_context *dc_ctx = ctx
#define CTX \
hws->ctx
#define REG(reg)\
hws->regs->reg
-#define DC_LOGGER \
- dc_logger
#undef FN
@@ -331,7 +331,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
- DC_LOGGER_INIT(stream->ctx->logger);
+ DC_LOGGER_INIT(stream->ctx);
ASSERT(dsc);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
@@ -897,7 +897,7 @@ void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx
bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
- DC_LOGGER_INIT(dc->ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
return;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 69cc70106bf0..357899116ecd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -95,10 +95,6 @@ void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- //For now assert if location is not pre-blend
- if (pipe_ctx->plane_state)
- ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
-
// program MPCC_MCM_FIRST_GAMUT_REMAP
memset(&mpc_adjust, 0, sizeof(mpc_adjust));
mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
@@ -304,6 +300,7 @@ void dcn401_init_hw(struct dc *dc)
}
}
}
+
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -372,293 +369,179 @@ void dcn401_init_hw(struct dc *dc)
}
}
-static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
- enum MCM_LUT_XABLE *shaper_xable,
- enum MCM_LUT_XABLE *lut3d_xable,
- enum MCM_LUT_XABLE *lut1d_xable)
+void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
- enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
- bool lut1d_enable = false;
- struct mpc *mpc = dc->res_pool->mpc;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
-
- if (!pipe_ctx->plane_state)
- return;
- shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
- lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
-
- *lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
- switch (shaper_3dlut_setting) {
- case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
- *lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
- break;
- case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
- *lut3d_xable = MCM_LUT_DISABLE;
- *shaper_xable = MCM_LUT_ENABLE;
- break;
- case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
- *lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
- break;
+ if (hubp->funcs->hubp_enable_3dlut_fl) {
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
}
}
-void dcn401_populate_mcm_luts(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_cm2_func_luts mcm_luts,
- bool lut_bank_a)
+bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
+ struct dc *dc = pipe_ctx->plane_res.hubp->ctx->dc;
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ const struct dc_plane_cm *cm = &plane_state->cm;
int mpcc_id = hubp->inst;
struct mpc *mpc = dc->res_pool->mpc;
union mcm_lut_params m_lut_params;
- enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
- enum hubp_3dlut_fl_format format = 0;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_width width = 0;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
- enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
- enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
- enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
+ struct dc_3dlut_dma lut3d_dma;
+ bool lut_enable;
+ bool lut_bank_a;
bool rval;
+ bool result = true;
- dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
+ /* decide LUT bank based on current in use */
+ mpc->funcs->get_lut_mode(mpc, MCM_LUT_1DLUT, mpcc_id, &lut_enable, &lut_bank_a);
+ if (!lut_enable) {
+ mpc->funcs->get_lut_mode(mpc, MCM_LUT_SHAPER, mpcc_id, &lut_enable, &lut_bank_a);
+ }
+ if (!lut_enable) {
+ mpc->funcs->get_lut_mode(mpc, MCM_LUT_3DLUT, mpcc_id, &lut_enable, &lut_bank_a);
+ }
+
+ /* switch to the next bank */
+ if (lut_enable) {
+ lut_bank_a = !lut_bank_a;
+ }
+
+ /* MCM location fixed to pre-blend */
+ mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
/* 1D LUT */
- if (mcm_luts.lut1d_func) {
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
- m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
- else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
- mcm_luts.lut1d_func,
- &dpp_base->regamma_params, false);
+ lut_enable = cm->flags.bits.blend_enable;
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
+ if (lut_enable) {
+ if (cm->blend_func.type == TF_TYPE_HWPWL)
+ m_lut_params.pwl = &cm->blend_func.pwl;
+ else if (cm->blend_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &cm->blend_func,
+ &dpp_base->regamma_params,
+ false);
m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
- if (m_lut_params.pwl) {
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
+
+ if (!m_lut_params.pwl) {
+ lut_enable = false;
}
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
+ } else {
+ lut_enable = false;
}
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut_enable, lut_bank_a, CM_LUT_SIZE_NONE, mpcc_id);
+ if (lut_enable && mpc->funcs->populate_lut)
+ mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, &m_lut_params, lut_bank_a, mpcc_id);
+
/* Shaper */
- if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
+ lut_enable = cm->flags.bits.shaper_enable;
+ if (lut_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
- m_lut_params.pwl = &mcm_luts.shaper->pwl;
- else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ if (cm->shaper_func.type == TF_TYPE_HWPWL)
+ m_lut_params.pwl = &cm->shaper_func.pwl;
+ else if (cm->shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
ASSERT(false);
- rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
- mcm_luts.shaper,
- &dpp_base->regamma_params, true);
- m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
+ rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &cm->shaper_func,
+ &dpp_base->shaper_params,
+ true);
+ m_lut_params.pwl = rval ? &dpp_base->shaper_params : NULL;
}
- if (m_lut_params.pwl) {
- if (mpc->funcs->mcm.populate_lut)
- mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
+ if (!m_lut_params.pwl) {
+ lut_enable = false;
}
+ } else {
+ lut_enable = false;
}
- /* 3DLUT */
- switch (lut3d_src) {
- case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (hubp->funcs->hubp_enable_3dlut_fl)
- hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, lut_enable, lut_bank_a, CM_LUT_SIZE_NONE, mpcc_id);
+ if (lut_enable && mpc->funcs->populate_lut)
+ mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, &m_lut_params, lut_bank_a, mpcc_id);
- if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
- m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
- mpcc_id);
- }
- break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
- switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_171717:
- width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- width = hubp_3dlut_fl_width_transformed;
- break;
- default:
- //TODO: handle default case
- break;
- }
-
- //check for support
- if (mpc->funcs->mcm.is_config_supported &&
- !mpc->funcs->mcm.is_config_supported(width))
- break;
+ /* NOTE: Toggling from DMA->Host is not supported atomically as hardware
+ * blocks writes until 3DLUT FL mode is cleared from HUBP on VUpdate.
+ * Expectation is either option is used consistently.
+ */
- if (mpc->funcs->program_lut_read_write_control)
- mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
+ /* 3DLUT */
+ lut_enable = cm->flags.bits.lut3d_enable;
+ if (lut_enable && cm->flags.bits.lut3d_dma_enable) {
+ /* Fast (DMA) Load Mode */
+ /* MPC */
if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
-
- if (hubp->funcs->hubp_program_3dlut_fl_addr)
- hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut_enable, lut_bank_a, cm->lut3d_dma.size, mpcc_id);
- if (mpc->funcs->mcm.program_bit_depth)
- mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+ /* only supports 12 bit */
+ if (mpc->funcs->program_lut_read_write_control)
+ mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, 12, mpcc_id);
- switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- mode = hubp_3dlut_fl_mode_native_1;
- addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
- mode = hubp_3dlut_fl_mode_native_2;
- addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
- mode = hubp_3dlut_fl_mode_transform;
- addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
- break;
- default:
- mode = hubp_3dlut_fl_mode_disable;
- addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_mode)
- hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
+ if (mpc->funcs->update_3dlut_fast_load_select)
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
- if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
-
- switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
- format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
- format = hubp_3dlut_fl_format_float_fp1_5_10;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_format)
- hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
- if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
- mpc->funcs->mcm.program_bias_scale) {
- mpc->funcs->mcm.program_bias_scale(mpc,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
- mpcc_id);
- hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
- }
-
- //navi 4x has a bug and r and blue are swapped and need to be worked around here in
- //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
- switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- default:
- crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
- }
+ /* HUBP */
+ if (hubp->funcs->hubp_program_3dlut_fl_config)
+ hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
- hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
- crossbar_bit_slice_cr_r,
- crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b);
-
- if (mpc->funcs->mcm.program_lut_read_write_control)
- mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
+ hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format);
- if (mpc->funcs->mcm.program_3dlut_size)
- mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
-
- if (mpc->funcs->update_3dlut_fast_load_select)
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
+ if (hubp->funcs->hubp_program_3dlut_fl_addr)
+ hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
- if (hubp->funcs->hubp_enable_3dlut_fl)
+ if (hubp->funcs->hubp_enable_3dlut_fl) {
hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- else {
- if (mpc->funcs->program_lut_mode) {
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- }
+ } else {
+ /* GPU memory only supports fast load path */
+ BREAK_TO_DEBUGGER();
+ lut_enable = false;
+ result = false;
}
- break;
+ } else {
+ /* Legacy (Host) Load Mode */
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
- }
-}
+ if (cm->flags.bits.lut3d_enable && cm->lut3d_func.state.bits.initialized) {
+ m_lut_params.lut3d = &cm->lut3d_func.lut_3d;
+ } else {
+ lut_enable = false;
+ }
-void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
-{
- struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ /* MPC */
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc,
+ MCM_LUT_3DLUT,
+ lut_enable,
+ lut_bank_a,
+ cm->lut3d_func.lut_3d.use_tetrahedral_9 ? CM_LUT_SIZE_999 : CM_LUT_SIZE_171717,
+ mpcc_id);
+
+ if (lut_enable) {
+ if (mpc->funcs->program_lut_read_write_control)
+ mpc->funcs->program_lut_read_write_control(mpc,
+ MCM_LUT_3DLUT,
+ lut_bank_a,
+ cm->lut3d_func.lut_3d.use_12bits ? 12 : 10,
+ mpcc_id);
- if (hubp->funcs->hubp_enable_3dlut_fl) {
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- }
-}
+ if (mpc->funcs->update_3dlut_fast_load_select)
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xf);
-bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
-{
- struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
- struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
- struct mpc *mpc = dc->res_pool->mpc;
- bool result;
- const struct pwl_params *lut_params = NULL;
- bool rval;
+ if (mpc->funcs->populate_lut)
+ mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, &m_lut_params, lut_bank_a, mpcc_id);
+ }
- if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
- return true;
- }
+ /* HUBP */
+ memset(&lut3d_dma, 0, sizeof(lut3d_dma));
+ if (hubp->funcs->hubp_program_3dlut_fl_config)
+ hubp->funcs->hubp_program_3dlut_fl_config(hubp, &lut3d_dma);
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
- // 1D LUT
- if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf.pwl;
- else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
- rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = rval ? &dpp_base->regamma_params : NULL;
- }
- result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
- lut_params = NULL;
-
- // Shaper
- if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->in_shaper_func.pwl;
- else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = rval ? &dpp_base->shaper_params : NULL;
- }
- result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
-
- // 3D
- if (mpc->funcs->program_3dlut) {
- if (plane_state->lut3d_func.state.bits.initialized == 1)
- result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
- else
- result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
+ if (hubp->funcs->hubp_enable_3dlut_fl)
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
}
return result;
@@ -982,6 +865,8 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
}
}
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
@@ -1930,10 +1815,9 @@ void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
- if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src
- == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
- && mpc_pipe->plane_state->mcm_shaper_3dlut_setting
- == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) {
+ if (mpc_pipe->plane_state &&
+ mpc_pipe->plane_state->cm.flags.bits.lut3d_enable &&
+ mpc_pipe->plane_state->cm.flags.bits.lut3d_dma_enable) {
wa_pipes[wa_pipe_ct++] = mpc_pipe;
}
}
@@ -1982,7 +1866,6 @@ void dcn401_reset_back_end_for_pipe(
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
@@ -2061,6 +1944,22 @@ void dcn401_reset_back_end_for_pipe(
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
+static void dc_hwss_disable_otg_pwa(struct dc *dc)
+{
+ if (dc->debug.enable_otg_frame_sync_pwa) {
+ int i;
+
+ /*reset all the otg*/
+ for (i = dc->res_pool->timing_generator_count - 1; i >= 0 ; i--) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->disable_otg_pwa) {
+ tg->funcs->disable_otg_pwa(tg);
+ DC_LOG_DC("otg frame sync pwa disabled on otg%d\n", tg->inst);
+ }
+ }
+ }
+}
void dcn401_reset_hw_ctx_wrap(
struct dc *dc,
@@ -2069,6 +1968,7 @@ void dcn401_reset_hw_ctx_wrap(
int i;
struct dce_hwseq *hws = dc->hwseq;
+ dc_hwss_disable_otg_pwa(dc);
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
struct pipe_ctx *pipe_ctx_old =
@@ -2259,6 +2159,10 @@ void dcn401_program_pipe(
pipe_ctx->stream_res.test_pattern_params.height,
pipe_ctx->stream_res.test_pattern_params.offset);
}
+ if (pipe_ctx->plane_state
+ && pipe_ctx->plane_state->update_flags.bits.cm_hist_change
+ && hws->funcs.program_cm_hist)
+ hws->funcs.program_cm_hist(dc, pipe_ctx, pipe_ctx->plane_state);
}
/*
@@ -2410,6 +2314,13 @@ void dcn401_program_pipe_sequence(
pipe_ctx->stream_res.test_pattern_params.offset);
}
+ if (pipe_ctx->plane_state
+ && pipe_ctx->plane_state->update_flags.bits.cm_hist_change
+ && hws->funcs.program_cm_hist) {
+
+ hwss_add_dpp_program_cm_hist(seq_state, pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_state->cm_hist_control, pipe_ctx->plane_state->color_space);
+ }
}
void dcn401_program_front_end_for_ctx(
@@ -2422,8 +2333,6 @@ void dcn401_program_front_end_for_ctx(
struct dce_hwseq *hws = dc->hwseq;
struct pipe_ctx *pipe = NULL;
- DC_LOGGER_INIT(dc->ctx->logger);
-
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
@@ -2587,8 +2496,6 @@ void dcn401_post_unlock_program_front_end(
struct dce_hwseq *hwseq = dc->hwseq;
int i;
- DC_LOGGER_INIT(dc->ctx->logger);
-
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
!resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
@@ -2968,8 +2875,6 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
struct dce_hwseq *hws = dc->hwseq;
uint32_t org_ip_request_cntl = 0;
- DC_LOGGER_INIT(dc->ctx->logger);
-
if (REG(DC_IP_REQUEST_CNTL)) {
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
@@ -3061,8 +2966,6 @@ void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
struct dce_hwseq *hws = dc->hwseq;
uint32_t org_ip_request_cntl = 0;
- DC_LOGGER_INIT(dc->ctx->logger);
-
/* Check and set DC_IP_REQUEST_CNTL if needed */
if (REG(DC_IP_REQUEST_CNTL)) {
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
index f0e1ed0f2949..8e12dc1297c4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
@@ -261,7 +261,7 @@ void dcn42_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
- dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
+ dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->clk_mgr->bw_params->num_channels, dc->config.sdpif_request_limit_words_per_umc);
// Get DMCUB capabilities
if (dc->ctx->dmub_srv) {
@@ -275,8 +275,7 @@ void dcn42_init_hw(struct dc *dc)
|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
if (dc->clk_mgr)
- dc->res_pool->funcs->update_bw_bounding_box(dc,
- dc->clk_mgr->bw_params);
+ dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
}
}
if (dc->res_pool->pg_cntl) {
@@ -383,68 +382,6 @@ void dcn42_program_cm_hist(
plane_state->cm_hist_control, plane_state->color_space);
}
-static void dc_get_lut_xbar(
- enum dc_cm2_gpu_mem_pixel_component_order order,
- enum hubp_3dlut_fl_crossbar_bit_slice *cr_r,
- enum hubp_3dlut_fl_crossbar_bit_slice *y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice *cb_b)
-{
- switch (order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- break;
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
- }
-}
-
-static void dc_get_lut_mode(
- enum dc_cm2_gpu_mem_layout layout,
- enum hubp_3dlut_fl_mode *mode,
- enum hubp_3dlut_fl_addressing_mode *addr_mode)
-{
- switch (layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- *mode = hubp_3dlut_fl_mode_native_1;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
- *mode = hubp_3dlut_fl_mode_native_2;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
- *mode = hubp_3dlut_fl_mode_transform;
- *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
- break;
- default:
- *mode = hubp_3dlut_fl_mode_disable;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- }
-}
-
-static void dc_get_lut_format(
- enum dc_cm2_gpu_mem_format dc_format,
- enum hubp_3dlut_fl_format *format)
-{
- switch (dc_format) {
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- *format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
- *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
- *format = hubp_3dlut_fl_format_float_fp1_5_10;
- break;
- }
-}
-
static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
{
if (mpc->funcs->rmcm.power_on_shaper_3dlut &&
@@ -455,119 +392,17 @@ static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
return false;
}
-static bool is_rmcm_3dlut_fl_supported(struct dc *dc, enum dc_cm2_gpu_mem_size size)
-{
- if (!dc->caps.color.mpc.rmcm_3d_lut_caps.dma_3d_lut)
- return false;
- if (size == DC_CM2_GPU_MEM_SIZE_171717)
- return (dc->caps.color.mpc.rmcm_3d_lut_caps.lut_dim_caps.dim_17);
- else if (size == DC_CM2_GPU_MEM_SIZE_333333)
- return (dc->caps.color.mpc.rmcm_3d_lut_caps.lut_dim_caps.dim_33);
- return false;
-}
-
-static void dcn42_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend)
-{
- struct mpc *mpc = dc->res_pool->mpc;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
-
- if (!pipe_ctx->plane_state)
- return;
-
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = (bPostBlend) ?
- MPCC_MOVABLE_CM_LOCATION_AFTER :
- MPCC_MOVABLE_CM_LOCATION_BEFORE;
-}
-
-static void dcn42_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
- enum MCM_LUT_XABLE *shaper_xable,
- enum MCM_LUT_XABLE *lut3d_xable,
- enum MCM_LUT_XABLE *lut1d_xable)
-{
- enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
- bool lut1d_enable = false;
- struct mpc *mpc = dc->res_pool->mpc;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
-
- if (!pipe_ctx->plane_state)
- return;
- shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
- lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
-
- *lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
-
- switch (shaper_3dlut_setting) {
- case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
- *lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
- break;
- case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
- *lut3d_xable = MCM_LUT_DISABLE;
- *shaper_xable = MCM_LUT_ENABLE;
- break;
- case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
- *lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
- break;
- }
-}
-
-static void fl_get_lut_mode(
- enum dc_cm2_gpu_mem_layout layout,
- enum dc_cm2_gpu_mem_size size,
- enum hubp_3dlut_fl_mode *mode,
- enum hubp_3dlut_fl_addressing_mode *addr_mode,
- enum hubp_3dlut_fl_width *width)
-{
- *width = hubp_3dlut_fl_width_17;
-
- if (size == DC_CM2_GPU_MEM_SIZE_333333)
- *width = hubp_3dlut_fl_width_33;
-
- switch (layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- *mode = hubp_3dlut_fl_mode_native_1;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
- *mode = hubp_3dlut_fl_mode_native_2;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
- *mode = hubp_3dlut_fl_mode_transform;
- *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
- break;
- default:
- *mode = hubp_3dlut_fl_mode_disable;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- }
-}
-
bool dcn42_program_rmcm_luts(
struct hubp *hubp,
struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
+ const struct dc_plane_cm *cm,
struct mpc *mpc,
- bool lut_bank_a,
int mpcc_id)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
union mcm_lut_params m_lut_params = {0};
- enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_format format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- enum hubp_3dlut_fl_width width = hubp_3dlut_fl_width_17;
-
struct dc *dc = hubp->ctx->dc;
- struct hubp_fl_3dlut_config fl_config;
struct mpc_fl_3dlut_config mpc_fl_config;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -575,25 +410,23 @@ bool dcn42_program_rmcm_luts(
// true->false when it can be allocated at DI time
struct dc_rmcm_3dlut *rmcm_3dlut = dc_stream_get_3dlut_for_stream(dc, stream, false);
+ bool lut_bank_a = true; // TODO get from HW
+
//check to see current pipe is part of a stream with allocated rmcm 3dlut
if (!rmcm_3dlut)
return false;
- rmcm_3dlut->protection_bits = mcm_luts->lut3d_data.rmcm_tmz;
-
- dcn42_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
-
/* Shaper */
- if (mcm_luts->shaper) {
+ if (cm->flags.bits.shaper_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts->shaper->type == TF_TYPE_HWPWL) {
- m_lut_params.pwl = &mcm_luts->shaper->pwl;
- } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ if (cm->shaper_func.type == TF_TYPE_HWPWL) {
+ m_lut_params.pwl = &cm->shaper_func.pwl;
+ } else if (cm->shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
ASSERT(false);
cm_helper_translate_curve_to_hw_format(
dc->ctx,
- mcm_luts->shaper,
+ &cm->shaper_func,
&dpp_base->shaper_params, true);
m_lut_params.pwl = &dpp_base->shaper_params;
}
@@ -609,58 +442,21 @@ bool dcn42_program_rmcm_luts(
}
/* 3DLUT */
- switch (lut3d_src) {
- case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
+ if (!cm->flags.bits.lut3d_dma_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
// Don't know what to do in this case.
- //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
- fl_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout,
- mcm_luts->lut3d_data.gpu_mem_params.size,
- &mode,
- &addr_mode,
- &width);
-
- if (!dc_is_rmcm_3dlut_supported(hubp, mpc) ||
- !mpc->funcs->rmcm.is_config_supported(
- (width == hubp_3dlut_fl_width_17 ||
- width == hubp_3dlut_fl_width_transformed) ? 17 : 33))
+ } else {
+ if (!dc_is_rmcm_3dlut_supported(hubp, mpc))
return false;
- // setting native or transformed mode,
- dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
-
//seems to be only for the MCM
- dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format);
-
- dc_get_lut_xbar(
- mcm_luts->lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
-
- fl_config.mode = mode;
- fl_config.enabled = lut3d_xable != MCM_LUT_DISABLE;
- fl_config.address = mcm_luts->lut3d_data.gpu_mem_params.addr;
- fl_config.format = format;
- fl_config.crossbar_bit_slice_y_g = crossbar_bit_slice_y_g;
- fl_config.crossbar_bit_slice_cb_b = crossbar_bit_slice_cb_b;
- fl_config.crossbar_bit_slice_cr_r = crossbar_bit_slice_cr_r;
- fl_config.width = width;
- fl_config.protection_bits = rmcm_3dlut->protection_bits;
- fl_config.addr_mode = addr_mode;
- fl_config.layout = mcm_luts->lut3d_data.gpu_mem_params.layout;
- fl_config.bias = mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias;
- fl_config.scale = mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale;
-
- mpc_fl_config.enabled = fl_config.enabled;
- mpc_fl_config.width = width;
+ mpc_fl_config.enabled = cm->flags.bits.lut3d_enable;
+ mpc_fl_config.size = cm->lut3d_dma.size;
mpc_fl_config.select_lut_bank_a = lut_bank_a;
- mpc_fl_config.bit_depth = mcm_luts->lut3d_data.gpu_mem_params.bit_depth;
+ mpc_fl_config.bit_depth = 0;
mpc_fl_config.hubp_index = hubp->inst;
- mpc_fl_config.bias = mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias;
- mpc_fl_config.scale = mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale;
+ mpc_fl_config.bias = cm->lut3d_dma.bias;
+ mpc_fl_config.scale = cm->lut3d_dma.scale;
//1. power down the block
mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false);
@@ -668,268 +464,44 @@ bool dcn42_program_rmcm_luts(
//2. program RMCM - 3dlut reg programming
mpc->funcs->rmcm.fl_3dlut_configure(mpc, &mpc_fl_config, mpcc_id);
- hubp->funcs->hubp_program_3dlut_fl_config(hubp, &fl_config);
+ /* HUBP */
+ if (hubp->funcs->hubp_program_3dlut_fl_config)
+ hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
+
+ if (hubp->funcs->hubp_program_3dlut_fl_addr)
+ hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
//3. power on the block
mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true);
-
- break;
- default:
- return false;
}
return true;
}
-void dcn42_populate_mcm_luts(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_cm2_func_luts mcm_luts,
- bool lut_bank_a)
+bool dcn42_set_mcm_luts(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
- struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ struct dc *dc = pipe_ctx->plane_res.hubp->ctx->dc;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- int mpcc_id = hubp->inst;
+ const struct dc_plane_cm *cm = &plane_state->cm;
struct mpc *mpc = dc->res_pool->mpc;
- union mcm_lut_params m_lut_params;
- enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
- enum hubp_3dlut_fl_format format = 0;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_width width = 0;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
- enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
- enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
- enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
- bool rval;
-
- dcn42_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
-
- //MCM - setting its location (Before/After) blender
- //set to post blend (true)
- dcn42_set_mcm_location_post_blend(
- dc,
- pipe_ctx,
- mcm_luts.lut3d_data.mpc_mcm_post_blend);
-
- //RMCM - 3dLUT+Shaper
- if (mcm_luts.lut3d_data.rmcm_3dlut_enable &&
- is_rmcm_3dlut_fl_supported(dc, mcm_luts.lut3d_data.gpu_mem_params.size)) {
+ int mpcc_id = hubp->inst;
+ bool result;
+
+ /* MCM */
+ result = dcn401_set_mcm_luts(pipe_ctx, plane_state);
+
+ /* RMCM */
+ if (cm->flags.bits.rmcm_enable && cm->flags.bits.lut3d_dma_enable) {
+ /* TODO - move RMCM to its own block */
dcn42_program_rmcm_luts(
hubp,
pipe_ctx,
- lut3d_src,
- &mcm_luts,
+ cm,
mpc,
- lut_bank_a,
mpcc_id);
}
- /* 1D LUT */
- if (mcm_luts.lut1d_func) {
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
- m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
- else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
- mcm_luts.lut1d_func,
- &dpp_base->regamma_params, false);
- m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
- }
- if (m_lut_params.pwl) {
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
- }
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
- }
-
- /* Shaper */
- if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
- m_lut_params.pwl = &mcm_luts.shaper->pwl;
- else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- ASSERT(false);
- rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
- mcm_luts.shaper,
- &dpp_base->regamma_params, true);
- m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
- }
- if (m_lut_params.pwl) {
- if (mpc->funcs->mcm.populate_lut)
- mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
- }
- }
-
- /* 3DLUT */
- switch (lut3d_src) {
- case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (hubp->funcs->hubp_enable_3dlut_fl)
- hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
-
- if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
- m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
- mpcc_id);
- }
- break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
- switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_333333:
- width = hubp_3dlut_fl_width_33;
- break;
- case DC_CM2_GPU_MEM_SIZE_171717:
- width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- width = hubp_3dlut_fl_width_transformed;
- break;
- default:
- //TODO: Handle default case
- break;
- }
-
- //check for support
- if (mpc->funcs->mcm.is_config_supported &&
- !mpc->funcs->mcm.is_config_supported(width))
- break;
-
- if (mpc->funcs->program_lut_read_write_control)
- mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
-
- if (hubp->funcs->hubp_program_3dlut_fl_addr)
- hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
-
- if (mpc->funcs->mcm.program_bit_depth)
- mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
-
- dc_get_lut_mode(mcm_luts.lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
- if (hubp->funcs->hubp_program_3dlut_fl_mode)
- hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
-
- if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
-
- switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
- format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
- format = hubp_3dlut_fl_format_float_fp1_5_10;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_format)
- hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
- if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
- mpc->funcs->mcm.program_bias_scale) {
- mpc->funcs->mcm.program_bias_scale(mpc,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
- mpcc_id);
- hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
- }
-
- //navi 4x has a bug and r and blue are swapped and need to be worked around here in
- //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
- dc_get_lut_xbar(
- mcm_luts.lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
-
- if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
- hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
- crossbar_bit_slice_cr_r,
- crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b);
-
- if (mpc->funcs->mcm.program_lut_read_write_control)
- mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
-
- if (mpc->funcs->mcm.program_3dlut_size)
- mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
-
- if (mpc->funcs->update_3dlut_fast_load_select)
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
-
- if (hubp->funcs->hubp_enable_3dlut_fl)
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- else {
- if (mpc->funcs->program_lut_mode) {
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- }
- }
- break;
- }
-}
-
-bool dcn42_set_mcm_luts(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
-{
- struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
- struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
- struct mpc *mpc = dc->res_pool->mpc;
- bool result;
- const struct pwl_params *lut_params = NULL;
- bool rval;
-
- if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- dcn42_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
- return true;
- }
-
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
- // 1D LUT
- if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf.pwl;
- else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
- rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = rval ? &dpp_base->regamma_params : NULL;
- }
- result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
- lut_params = NULL;
-
- // Shaper
- if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->in_shaper_func.pwl;
- else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = rval ? &dpp_base->shaper_params : NULL;
- }
- result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
-
- // 3D
- if (mpc->funcs->program_3dlut) {
- if (plane_state->lut3d_func.state.bits.initialized == 1)
- result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
- else
- result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
- }
-
return result;
}
void dcn42_hardware_release(struct dc *dc)
@@ -1050,7 +622,7 @@ void dcn42_prepare_bandwidth(
dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
- dcn20_prepare_bandwidth(dc, context);
+ dcn401_prepare_bandwidth(dc, context);
}
void dcn42_optimize_bandwidth(struct dc *dc, struct dc_state *context)
@@ -1470,3 +1042,50 @@ void dcn42_dmub_hw_control_lock_fast(union block_sequence_params *params)
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
}
}
+
+/* In headless boot cases, DIG may be turned
+ * on which causes HW/SW discrepancies.
+ * To avoid this, power down hardware on boot
+ * if DIG is turned on
+ */
+void dcn42_power_down_on_boot(struct dc *dc)
+{
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ struct dc_link *edp_link = NULL;
+ int edp_num;
+ int i = 0;
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ if (edp_num)
+ edp_link = edp_links[0];
+
+ if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ dc->hwseq->funcs.edp_backlight_control &&
+ dc->hwseq->funcs.power_down &&
+ dc->hwss.edp_power_control) {
+ dc->hwseq->funcs.edp_backlight_control(edp_link, false);
+ dc->hwseq->funcs.power_down(dc);
+ dc->hwss.edp_power_control(edp_link, false);
+ } else {
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+ dc->hwseq->funcs.power_down) {
+ dc->hwseq->funcs.power_down(dc);
+ break;
+ }
+
+ }
+ }
+
+ /*
+ * Call update_clocks with empty context
+ * to send DISPLAY_OFF
+ * Otherwise DISPLAY_OFF may not be asserted
+ */
+ if (dc->clk_mgr->funcs->set_low_power_state)
+ dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h
index 89ebb6520eaf..c4cfeed45b19 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h
@@ -18,18 +18,11 @@ void dcn42_program_cm_hist(
bool dcn42_set_mcm_luts(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
-void dcn42_populate_mcm_luts(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_cm2_func_luts mcm_luts,
- bool lut_bank_a);
-
bool dcn42_program_rmcm_luts(
struct hubp *hubp,
struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
+ const struct dc_plane_cm *cm,
struct mpc *mpc,
- bool lut_bank_a,
int mpcc_id);
void dcn42_hardware_release(struct dc *dc);
@@ -50,4 +43,5 @@ void dcn42_root_clock_control(struct dc *dc,
void dcn42_dmub_hw_control_lock(struct dc *dc, struct dc_state *context, bool lock);
void dcn42_dmub_hw_control_lock_fast(union block_sequence_params *params);
void dcn42_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc);
+void dcn42_power_down_on_boot(struct dc *dc);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c
index a8e2f59d5e50..b324a2195e8a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c
@@ -19,7 +19,7 @@ static const struct hw_sequencer_funcs dcn42_funcs = {
.program_gamut_remap = dcn401_program_gamut_remap,
.init_hw = dcn42_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
- .power_down_on_boot = dcn35_power_down_on_boot,
+ .power_down_on_boot = dcn42_power_down_on_boot,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn401_program_front_end_for_ctx,
.clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
@@ -64,6 +64,12 @@ static const struct hw_sequencer_funcs dcn42_funcs = {
.set_cursor_position = dcn401_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 91eba1985bab..21224fd6b36d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -215,7 +215,7 @@ struct clk_state_registers_and_bypass {
uint32_t dcfclk_bypass;
uint32_t dprefclk_bypass;
uint32_t dispclk_bypass;
- uint32_t timer_threhold;
+ uint32_t timer_threshold;
};
struct rv1_clk_internal {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 9e53eacee3f8..c69ccfcebeb5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -243,7 +243,23 @@ enum dentist_divider_range {
CLK_SR_DCN42(CLK8_CLK3_DS_CNTL), \
CLK_SR_DCN42(CLK8_CLK4_DS_CNTL)
-#define CLK_COMMON_MASK_SH_LIST_DCN42(mask_sh) 0
+#define CLK_COMMON_MASK_SH_LIST_DCN42(mask_sh) \
+ CLK_SF(CLK8_CLK_TICK_CNT_CONFIG_REG, TIMER_THRESHOLD, mask_sh), \
+ CLK_SF(CLK8_CLK0_BYPASS_CNTL, CLK0_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK1_BYPASS_CNTL, CLK1_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK3_BYPASS_CNTL, CLK3_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK4_BYPASS_CNTL, CLK4_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK0_DS_CNTL, CLK0_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK1_DS_CNTL, CLK1_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK2_DS_CNTL, CLK2_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK3_DS_CNTL, CLK3_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK4_DS_CNTL, CLK4_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK0_DS_CNTL, CLK0_ALLOW_DS, mask_sh), \
+ CLK_SF(CLK8_CLK1_DS_CNTL, CLK1_ALLOW_DS, mask_sh), \
+ CLK_SF(CLK8_CLK2_DS_CNTL, CLK2_ALLOW_DS, mask_sh), \
+ CLK_SF(CLK8_CLK3_DS_CNTL, CLK3_ALLOW_DS, mask_sh), \
+ CLK_SF(CLK8_CLK4_DS_CNTL, CLK4_ALLOW_DS, mask_sh), \
@@ -259,6 +275,42 @@ enum dentist_divider_range {
type FbMult_int; \
type FbMult_frac;
+#define CLK42_REG_LIST(clkip_num, type) \
+ type CLK ## clkip_num ## _CLK_TICK_CNT_CONFIG_REG; \
+ type CLK ## clkip_num ## _CLK0_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK1_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK2_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK3_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK4_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK0_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK1_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK2_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK3_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK4_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK0_DS_CNTL; \
+ type CLK ## clkip_num ## _CLK1_DS_CNTL; \
+ type CLK ## clkip_num ## _CLK2_DS_CNTL; \
+ type CLK ## clkip_num ## _CLK3_DS_CNTL; \
+ type CLK ## clkip_num ## _CLK4_DS_CNTL;
+
+#define CLK42_REG_FIELD_LIST(type) \
+ type TIMER_THRESHOLD; \
+ type CLK0_BYPASS_SEL; \
+ type CLK1_BYPASS_SEL; \
+ type CLK2_BYPASS_SEL; \
+ type CLK3_BYPASS_SEL; \
+ type CLK4_BYPASS_SEL; \
+ type CLK0_DS_DIV_ID; \
+ type CLK1_DS_DIV_ID; \
+ type CLK2_DS_DIV_ID; \
+ type CLK3_DS_DIV_ID; \
+ type CLK4_DS_DIV_ID; \
+ type CLK0_ALLOW_DS; \
+ type CLK1_ALLOW_DS; \
+ type CLK2_ALLOW_DS; \
+ type CLK3_ALLOW_DS; \
+ type CLK4_ALLOW_DS;
+
/*
***************************************************************************************
****************** Clock Manager Private Structures ***********************************
@@ -322,32 +374,19 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK5_ALLOW_DS;
uint32_t CLK5_spll_field_8;
uint32_t CLK6_spll_field_8;
- uint32_t CLK8_CLK0_CURRENT_CNT;
- uint32_t CLK8_CLK1_CURRENT_CNT;
- uint32_t CLK8_CLK2_CURRENT_CNT;
- uint32_t CLK8_CLK3_CURRENT_CNT;
- uint32_t CLK8_CLK4_CURRENT_CNT;
- uint32_t CLK8_CLK0_DS_CNTL;
- uint32_t CLK8_CLK1_DS_CNTL;
- uint32_t CLK8_CLK2_DS_CNTL;
- uint32_t CLK8_CLK3_DS_CNTL;
- uint32_t CLK8_CLK4_DS_CNTL;
- uint32_t CLK8_CLK0_BYPASS_CNTL;
- uint32_t CLK8_CLK1_BYPASS_CNTL;
- uint32_t CLK8_CLK2_BYPASS_CNTL;
- uint32_t CLK8_CLK3_BYPASS_CNTL;
- uint32_t CLK8_CLK4_BYPASS_CNTL;
- uint32_t CLK8_CLK_TICK_CNT_CONFIG_REG;
+ CLK42_REG_LIST(8, uint32_t)
};
struct clk_mgr_shift {
CLK_REG_FIELD_LIST(uint8_t)
CLK20_REG_FIELD_LIST(uint8_t)
+ CLK42_REG_FIELD_LIST(uint8_t)
};
struct clk_mgr_mask {
CLK_REG_FIELD_LIST(uint32_t)
CLK20_REG_FIELD_LIST(uint32_t)
+ CLK42_REG_FIELD_LIST(uint32_t)
};
enum clock_type {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index a79019365af8..2a5a81d15950 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -89,7 +89,7 @@ enum hubp_3dlut_fl_addressing_mode {
enum hubp_3dlut_fl_width {
hubp_3dlut_fl_width_17 = 17,
hubp_3dlut_fl_width_33 = 33,
- hubp_3dlut_fl_width_transformed = 4916, //mpc default
+ hubp_3dlut_fl_width_17_transformed = 4916, //mpc default
};
enum hubp_3dlut_fl_crossbar_bit_slice {
@@ -99,22 +99,6 @@ enum hubp_3dlut_fl_crossbar_bit_slice {
hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3
};
-struct hubp_fl_3dlut_config {
- bool enabled;
- enum hubp_3dlut_fl_width width;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_format format;
- uint16_t bias;
- uint16_t scale;
- struct dc_plane_address address;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum dc_cm2_gpu_mem_layout layout;
- uint8_t protection_bits;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
-};
-
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
@@ -243,7 +227,6 @@ struct hubp_funcs {
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
void (*hubp_init)(struct hubp *hubp);
-
void (*dmdata_set_attributes)(
struct hubp *hubp,
const struct dc_dmdata_attributes *attr);
@@ -290,24 +273,15 @@ struct hubp_funcs {
void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
void (*hubp_program_mcache_id_and_split_coordinate)(struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs);
- void (*hubp_update_3dlut_fl_bias_scale)(struct hubp *hubp, uint16_t bias, uint16_t scale);
- void (*hubp_program_3dlut_fl_mode)(struct hubp *hubp,
- enum hubp_3dlut_fl_mode mode);
- void (*hubp_program_3dlut_fl_format)(struct hubp *hubp,
- enum hubp_3dlut_fl_format format);
void (*hubp_program_3dlut_fl_addr)(struct hubp *hubp,
- const struct dc_plane_address address);
+ const struct dc_plane_address *address);
+ void (*hubp_program_3dlut_fl_config)(struct hubp *hubp,
+ const struct dc_3dlut_dma *config);
void (*hubp_program_3dlut_fl_dlg_param)(struct hubp *hubp, int refcyc_per_3dlut_group);
void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable);
- void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
- void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width);
- void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits);
void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
+ enum dc_cm_lut_pixel_format format);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
- void (*hubp_program_3dlut_fl_config)(struct hubp *hubp, struct hubp_fl_3dlut_config *cfg);
void (*hubp_clear_tiling)(struct hubp *hubp);
uint32_t (*hubp_get_current_read_line)(struct hubp *hubp);
uint32_t (*hubp_get_det_config_error)(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index a61d12ec61bc..b152f6879495 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -152,6 +152,13 @@ struct dc_rgb {
uint32_t blue;
};
+struct tetrahedral_33x33x33 {
+ struct dc_rgb lut0[8985];
+ struct dc_rgb lut1[8984];
+ struct dc_rgb lut2[8984];
+ struct dc_rgb lut3[8984];
+};
+
struct tetrahedral_17x17x17 {
struct dc_rgb lut0[1229];
struct dc_rgb lut1[1228];
@@ -165,14 +172,23 @@ struct tetrahedral_9x9x9 {
struct dc_rgb lut3[182];
};
+enum lut_dimension {
+ LUT_DIM_INVALID = 0,
+ LUT_DIM_9 = 9,
+ LUT_DIM_17 = 17,
+ LUT_DIM_33 = 33,
+};
+
struct tetrahedral_params {
union {
+//TODO: Uncomment when in use.
+// struct tetrahedral_33x33x33 tetrahedral_33;
struct tetrahedral_17x17x17 tetrahedral_17;
struct tetrahedral_9x9x9 tetrahedral_9;
};
bool use_tetrahedral_9;
bool use_12bits;
-
+ enum lut_dimension lut_dim;
};
/* arr_curve_points - regamma regions/segments specification
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 0db607f2a410..f5617674bea8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -54,6 +54,7 @@
#include "dc_hw_types.h"
#include "hw_shared.h"
#include "transform.h"
+#include "dc_types.h"
#define MAX_MPCC 6
#define MAX_OPP 6
@@ -66,7 +67,6 @@ enum mpc_output_csc_mode {
MPC_OUTPUT_CSC_COEF_B
};
-
enum mpcc_blend_mode {
MPCC_BLEND_MODE_BYPASS,
MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH,
@@ -102,13 +102,6 @@ enum mpcc_movable_cm_location {
MPCC_MOVABLE_CM_LOCATION_AFTER,
};
-enum MCM_LUT_XABLE {
- MCM_LUT_DISABLE,
- MCM_LUT_DISABLED = MCM_LUT_DISABLE,
- MCM_LUT_ENABLE,
- MCM_LUT_ENABLED = MCM_LUT_ENABLE,
-};
-
enum MCM_LUT_ID {
MCM_LUT_3DLUT,
MCM_LUT_1DLUT,
@@ -117,7 +110,7 @@ enum MCM_LUT_ID {
struct mpc_fl_3dlut_config {
bool enabled;
- uint16_t width;
+ enum dc_cm_lut_size size;
bool select_lut_bank_a;
uint16_t bit_depth;
int hubp_index;
@@ -1042,22 +1035,22 @@ struct mpc_funcs {
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
-/**
- * @get_3dlut_fast_load_status:
- *
- * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] mpcc_id
- * - [in/out] done
- * - [in/out] soft_underflow
- * - [in/out] hard_underflow
- *
- * Return:
- *
- * void
- */
+ /**
+ * @get_3dlut_fast_load_status:
+ *
+ * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers.
+ *
+ * Parameters:
+ * - [in/out] mpc - MPC context.
+ * - [in] mpcc_id
+ * - [in/out] done
+ * - [in/out] soft_underflow
+ * - [in/out] hard_underflow
+ *
+ * Return:
+ *
+ * void
+ */
void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow);
/**
@@ -1076,8 +1069,11 @@ struct mpc_funcs {
*
* void
*/
- void (*populate_lut)(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params,
- bool lut_bank_a, int mpcc_id);
+ void (*populate_lut)(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const union mcm_lut_params *params,
+ const bool lut_bank_a,
+ const int mpcc_id);
/**
* @program_lut_read_write_control:
@@ -1088,13 +1084,18 @@ struct mpc_funcs {
* - [in/out] mpc - MPC context.
* - [in] id
* - [in] lut_bank_a
+ * - [in] bit_depth
* - [in] mpcc_id
*
* Return:
*
* void
*/
- void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id, bool lut_bank_a, int mpcc_id);
+ void (*program_lut_read_write_control)(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const bool lut_bank_a,
+ const unsigned int bit_depth,
+ const int mpcc_id);
/**
* @program_lut_mode:
@@ -1104,33 +1105,44 @@ struct mpc_funcs {
* Parameters:
* - [in/out] mpc - MPC context.
* - [in] id
- * - [in] xable
+ * - [in] enable
* - [in] lut_bank_a
+ * - [in] size
* - [in] mpcc_id
*
* Return:
*
* void
*/
- void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
- bool lut_bank_a, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const bool enable,
+ const bool lut_bank_a,
+ const enum dc_cm_lut_size size,
+ const int mpcc_id);
- /**
- * @mcm:
- *
- * MPC MCM new HW sequential programming functions
- */
- struct {
- void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
- void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
- void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
- bool (*is_config_supported)(uint32_t width);
- void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
- bool lut_bank_a, bool enabled, int mpcc_id);
- void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
- bool lut_bank_a, int mpcc_id);
- } mcm;
+ /**
+ * @get_lut_mode:
+ *
+ * Obtains enablement and ram bank status.
+ *
+ * Parameters:
+ * - [in/out] mpc - MPC context.
+ * - [in] id
+ * - [in] mpcc_id
+ * - [out] enable
+ * - [out] lut_bank_a
+ *
+ * Return:
+ *
+ * void
+ */
+ void (*get_lut_mode)(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const int mpcc_id,
+ bool *enable,
+ bool *lut_bank_a);
/**
* @rmcm:
@@ -1143,9 +1155,11 @@ struct mpc_funcs {
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
bool lut_bank_a, bool enabled, int mpcc_id);
- void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_XABLE xable,
- bool lut_bank_a, int mpcc_id);
- void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc,
+ bool enable,
+ bool lut_bank_a,
+ int mpcc_id);
+ void (*program_3dlut_size)(struct mpc *mpc, const enum dc_cm_lut_size size, int mpcc_id);
void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
bool (*is_config_supported)(uint32_t width);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 79746d931471..cecd3282a29f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -60,6 +60,7 @@ struct resource_caps {
int num_hpo_dp_stream_encoder;
int num_hpo_dp_link_encoder;
int num_mpc_3dlut;
+ int num_rmcm;
};
struct resource_straps {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index d0bb26888f4b..f992c2d16748 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -352,7 +352,7 @@ static void query_dp_dual_mode_adaptor(
*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ CONN_DATA_DETECT(link, type2_dongle_buf, sizeof(type2_dongle_buf),
"DP-DVI passive dongle %dMhz: ",
DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
return;
@@ -657,8 +657,6 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
unsigned long long time_taken_in_ns;
int tries_taken;
- DC_LOGGER_INIT(link->ctx->logger);
-
/**
* this function will only exist if we are on dcn21 (is_in_alt_mode is a
* function pointer, so checking to see if it is equal to 0 is the same
@@ -729,8 +727,6 @@ static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason)
{
- DC_LOGGER_INIT(link->ctx->logger);
-
LINK_INFO("link=%d, mst branch is now Connected\n",
link->link_index);
@@ -750,8 +746,6 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
bool link_reset_cur_dp_mst_topology(struct dc_link *link)
{
- DC_LOGGER_INIT(link->ctx->logger);
-
LINK_INFO("link=%d, mst branch is now Disconnected\n",
link->link_index);
@@ -977,8 +971,6 @@ static bool detect_link_and_local_sink(struct dc_link *link,
enum dc_connection_type new_connection_type = dc_connection_none;
const uint32_t post_oui_delay = 30; // 30ms
- DC_LOGGER_INIT(link->ctx->logger);
-
if (dc_is_virtual_signal(link->connector_signal))
return false;
@@ -1459,8 +1451,6 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
bool is_delegated_to_mst_top_mgr = false;
enum dc_connection_type pre_link_type = link->type;
- DC_LOGGER_INIT(link->ctx->logger);
-
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
if (is_local_sink_detect_success && link->local_sink)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index cc18a3bebef2..1860d44f63c1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -223,9 +223,10 @@ static void handle_hpd_irq_vesa_replay_sink(struct dc_link *link)
}
}
-static void handle_hpd_irq_replay_sink(struct dc_link *link)
+static void handle_hpd_irq_replay_sink(struct dc_link *link, bool *need_re_enable)
{
union dpcd_replay_configuration replay_configuration = {0};
+ union dpcd_replay_configuration replay_sink_status = {0};
/*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
union psr_error_status replay_error_status = {0};
bool ret = false;
@@ -265,9 +266,17 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_error_status.raw,
sizeof(replay_error_status.raw));
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_PR_REPLAY_SINK_STATUS,
+ &replay_sink_status.raw,
+ 1);
+
if (replay_error_status.bits.LINK_CRC_ERROR ||
replay_configuration.bits.DESYNC_ERROR_STATUS ||
- replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) {
+ replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS ||
+ replay_sink_status.bits.SINK_DEVICE_REPLAY_STATUS == 0x7) {
bool allow_active;
link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
@@ -299,8 +308,7 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
if (link->replay_settings.replay_allow_active) {
allow_active = false;
edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
- allow_active = true;
- edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
+ *need_re_enable = true;
}
}
}
@@ -460,6 +468,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
union device_service_irq device_service_clear = {0};
enum dc_status result;
bool status = false;
+ bool replay_re_enable_needed = false;
if (out_link_loss)
*out_link_loss = false;
@@ -519,7 +528,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
/* PSR-related error was detected and handled */
return true;
- handle_hpd_irq_replay_sink(link);
+ handle_hpd_irq_replay_sink(link, &replay_re_enable_needed);
/* If PSR-related error handled, Main link may be off,
* so do not handle as a normal sink status change interrupt.
@@ -538,16 +547,16 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
return false;
}
- /* For now we only handle 'Downstream port status' case.
+ /* Handle 'Downstream port status' case for all DP link types.
* If we got sink count changed it means
* Downstream port status changed,
* then DM should call DC to do the detection.
- * NOTE: Do not handle link loss on eDP since it is internal link
+ * NOTE: Now includes eDP link loss detection and retraining
*/
- if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
- dp_parse_link_loss_status(
- link,
- &hpd_irq_dpcd_data)) {
+
+ if (dp_parse_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
/* Connectivity log: link loss */
CONN_DATA_LINK_LOSS(link,
hpd_irq_dpcd_data.raw,
@@ -576,6 +585,11 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
!= link->dpcd_sink_count)
status = true;
+ if (replay_re_enable_needed) {
+ bool allow_active = true;
+
+ edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
+ }
/* reasons for HPD RX:
* 1. Link Loss - ie Re-train the Link
* 2. MST sideband message
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c
index 6661078c0241..96afce4ffbfa 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c
@@ -252,23 +252,24 @@ bool dp_pr_enable(struct dc_link *link, bool enable)
if (!dp_pr_get_panel_inst(dc, link, &panel_inst))
return false;
+ if (link->replay_settings.replay_allow_active == enable)
+ return true;
+
if (enable && !dc_is_embedded_signal(link->connector_signal))
dp_pr_set_static_screen_param(link);
- if (link->replay_settings.replay_allow_active != enable) {
- //for sending PR enable commands to DMUB
- memset(&cmd, 0, sizeof(cmd));
+ // for sending PR enable commands to DMUB
+ memset(&cmd, 0, sizeof(cmd));
- cmd.pr_enable.header.type = DMUB_CMD__PR;
- cmd.pr_enable.header.sub_type = DMUB_CMD__PR_ENABLE;
- cmd.pr_enable.header.payload_bytes = sizeof(struct dmub_cmd_pr_enable_data);
- cmd.pr_enable.data.panel_inst = panel_inst;
- cmd.pr_enable.data.enable = enable ? 1 : 0;
+ cmd.pr_enable.header.type = DMUB_CMD__PR;
+ cmd.pr_enable.header.sub_type = DMUB_CMD__PR_ENABLE;
+ cmd.pr_enable.header.payload_bytes = sizeof(struct dmub_cmd_pr_enable_data);
+ cmd.pr_enable.data.panel_inst = panel_inst;
+ cmd.pr_enable.data.enable = enable ? 1 : 0;
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
- dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ link->replay_settings.replay_allow_active = enable;
- link->replay_settings.replay_allow_active = enable;
- }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 5b2c1a4911cf..4a2699a374b7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -847,6 +847,7 @@ bool edp_setup_psr(struct dc_link *link,
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_11_0_1:
+ case AMDGPU_FAMILY_GC_11_5_4:
if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
break;
@@ -1094,8 +1095,6 @@ bool edp_send_replay_cmd(struct dc_link *link,
if (!replay)
return false;
- DC_LOGGER_INIT(link->ctx->logger);
-
if (dp_pr_get_panel_inst(dc, link, &panel_inst))
cmd_data->panel_inst = panel_inst;
else {
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
index b23c64004dd5..27e653234850 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
@@ -118,9 +118,7 @@ void mpc1_assert_idle_mpcc(struct mpc *mpc, int id)
struct mpcc *mpc1_get_mpcc(struct mpc *mpc, int mpcc_id)
{
- struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
-
- ASSERT(mpcc_id < mpc10->num_mpcc);
+ ASSERT(mpcc_id < TO_DCN10_MPC(mpc)->num_mpcc);
return &(mpc->mpcc_array[mpcc_id]);
}
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index ce1ee2062e41..e0617db2d0c1 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -73,56 +73,15 @@ void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_locati
}
}
-static enum dc_lut_mode get3dlut_config(
- struct mpc *mpc,
- bool *is_17x17x17,
- bool *is_12bits_color_channel,
- int mpcc_id)
-{
- uint32_t i_mode, i_enable_10bits, lut_size;
- enum dc_lut_mode mode;
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id],
- MPCC_MCM_3DLUT_MODE_CURRENT, &i_mode);
-
- REG_GET(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id],
- MPCC_MCM_3DLUT_30BIT_EN, &i_enable_10bits);
-
- switch (i_mode) {
- case 0:
- mode = LUT_BYPASS;
- break;
- case 1:
- mode = LUT_RAM_A;
- break;
- case 2:
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
- if (i_enable_10bits > 0)
- *is_12bits_color_channel = false;
- else
- *is_12bits_color_channel = true;
-
- REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, &lut_size);
-
- if (lut_size == 0)
- *is_17x17x17 = true;
- else
- *is_17x17x17 = false;
-
- return mode;
-}
-
-void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params, bool lut_bank_a, int mpcc_id)
+void mpc401_populate_lut(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const union mcm_lut_params *params,
+ const bool lut_bank_a,
+ const int mpcc_id)
{
const enum dc_lut_mode next_mode = lut_bank_a ? LUT_RAM_A : LUT_RAM_B;
- const struct pwl_params *lut1d = params.pwl;
- const struct pwl_params *lut_shaper = params.pwl;
+ const struct pwl_params *lut1d = params->pwl;
+ const struct pwl_params *lut_shaper = params->pwl;
bool is_17x17x17;
bool is_12bits_color_channel;
const struct dc_rgb *lut0;
@@ -131,7 +90,7 @@ void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union
const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
- const struct tetrahedral_params *lut3d = params.lut3d;
+ const struct tetrahedral_params *lut3d = params->lut3d;
switch (id) {
case MCM_LUT_1DLUT:
@@ -174,8 +133,6 @@ void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union
mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true);
- get3dlut_config(mpc, &is_17x17x17, &is_12bits_color_channel, mpcc_id);
-
is_17x17x17 = !lut3d->use_tetrahedral_9;
is_12bits_color_channel = lut3d->use_12bits;
if (is_17x17x17) {
@@ -198,8 +155,6 @@ void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union
sizeof(lut3d->tetrahedral_9.lut1[0]);
}
- mpc32_select_3dlut_ram(mpc, next_mode,
- is_12bits_color_channel, mpcc_id);
mpc32_select_3dlut_ram_mask(mpc, 0x1, mpcc_id);
if (is_12bits_color_channel)
mpc32_set3dlut_ram12(mpc, lut0, lut_size0, mpcc_id);
@@ -232,46 +187,69 @@ void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union
}
+static uint32_t mpc401_cm_lut_size_to_3dlut_size(const enum dc_cm_lut_size cm_size)
+{
+ uint32_t size = 0;
+
+ switch (cm_size) {
+ case CM_LUT_SIZE_999:
+ size = 1;
+ break;
+ case CM_LUT_SIZE_171717:
+ size = 0;
+ break;
+ default:
+ /* invalid LUT size */
+ ASSERT(false);
+ size = 0;
+ break;
+ }
+
+ return size;
+}
+
void mpc401_program_lut_mode(
struct mpc *mpc,
const enum MCM_LUT_ID id,
- const enum MCM_LUT_XABLE xable,
- bool lut_bank_a,
- int mpcc_id)
+ const bool enable,
+ const bool lut_bank_a,
+ const enum dc_cm_lut_size size,
+ const int mpcc_id)
{
+ uint32_t lut_size;
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
switch (id) {
case MCM_LUT_3DLUT:
- switch (xable) {
- case MCM_LUT_DISABLE:
+ if (enable) {
+ lut_size = mpc401_cm_lut_size_to_3dlut_size(size);
+ REG_UPDATE_2(MPCC_MCM_3DLUT_MODE[mpcc_id],
+ MPCC_MCM_3DLUT_MODE, lut_bank_a ? 1 : 2,
+ MPCC_MCM_3DLUT_SIZE, lut_size);
+ } else {
+ if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
+ mpc32_power_on_shaper_3dlut(mpc, mpcc_id, false);
REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE, 0);
- break;
- case MCM_LUT_ENABLE:
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE, lut_bank_a ? 1 : 2);
- break;
}
break;
case MCM_LUT_SHAPER:
- switch (xable) {
- case MCM_LUT_DISABLE:
- REG_UPDATE(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_LUT_MODE, 0);
- break;
- case MCM_LUT_ENABLE:
+ if (enable) {
REG_UPDATE(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_LUT_MODE, lut_bank_a ? 1 : 2);
- break;
+ } else {
+ if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
+ mpc32_power_on_shaper_3dlut(mpc, mpcc_id, false);
+ REG_UPDATE(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_LUT_MODE, 0);
}
break;
case MCM_LUT_1DLUT:
- switch (xable) {
- case MCM_LUT_DISABLE:
- REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
- MPCC_MCM_1DLUT_MODE, 0);
- break;
- case MCM_LUT_ENABLE:
+ if (enable) {
REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
MPCC_MCM_1DLUT_MODE, 2);
- break;
+ } else {
+ if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
+ mpc32_power_on_blnd_lut(mpc, mpcc_id, false);
+ REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
+ MPCC_MCM_1DLUT_MODE, 0);
}
REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
MPCC_MCM_1DLUT_SELECT, lut_bank_a ? 0 : 1);
@@ -279,14 +257,20 @@ void mpc401_program_lut_mode(
}
}
-void mpc401_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_ID id, bool lut_bank_a, int mpcc_id)
+void mpc401_program_lut_read_write_control(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const bool lut_bank_a,
+ const unsigned int bit_depth,
+ const int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
switch (id) {
case MCM_LUT_3DLUT:
mpc32_select_3dlut_ram_mask(mpc, 0xf, mpcc_id);
- REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_RAM_SEL, lut_bank_a ? 0 : 1);
+ REG_UPDATE_2(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id],
+ MPCC_MCM_3DLUT_30BIT_EN, (bit_depth == 10) ? 1 : 0,
+ MPCC_MCM_3DLUT_RAM_SEL, lut_bank_a ? 0 : 1);
break;
case MCM_LUT_SHAPER:
mpc32_configure_shaper_lut(mpc, lut_bank_a, mpcc_id);
@@ -578,6 +562,44 @@ void mpc401_get_gamut_remap(struct mpc *mpc,
arr_reg_val, ARRAY_SIZE(arr_reg_val));
}
+void mpc401_get_lut_mode(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const int mpcc_id,
+ bool *enable,
+ bool *lut_bank_a)
+{
+ struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
+
+ uint32_t lut_mode = 0;
+ uint32_t lut_select = 0;
+
+ *enable = false;
+ *lut_bank_a = true;
+
+ switch (id) {
+ case MCM_LUT_SHAPER:
+ REG_GET(MPCC_MCM_SHAPER_CONTROL[mpcc_id],
+ MPCC_MCM_SHAPER_MODE_CURRENT, &lut_mode);
+ *enable = lut_mode != 0;
+ *lut_bank_a = lut_mode != 2;
+ break;
+ case MCM_LUT_1DLUT:
+ REG_GET_2(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
+ MPCC_MCM_1DLUT_MODE_CURRENT, &lut_mode,
+ MPCC_MCM_1DLUT_SELECT_CURRENT, &lut_select);
+ *enable = lut_mode != 0;
+ *lut_bank_a = lut_mode == 0 || lut_select == 0;
+ break;
+ case MCM_LUT_3DLUT:
+ default:
+ REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id],
+ MPCC_MCM_3DLUT_MODE_CURRENT, &lut_mode);
+ *enable = lut_mode != 0;
+ *lut_bank_a = lut_mode != 2;
+ break;
+ }
+}
+
static const struct mpc_funcs dcn401_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -616,6 +638,7 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
+ .get_lut_mode = mpc401_get_lut_mode,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index 6d842d7b95c7..c16560c84453 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -206,21 +206,32 @@ void dcn401_mpc_construct(struct dcn401_mpc *mpc401,
int num_rmu);
void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id);
-void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params,
- bool lut_bank_a, int mpcc_id);
+void mpc401_populate_lut(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const union mcm_lut_params *params,
+ bool lut_bank_a,
+ int mpcc_id);
void mpc401_program_lut_mode(
struct mpc *mpc,
const enum MCM_LUT_ID id,
- const enum MCM_LUT_XABLE xable,
- bool lut_bank_a,
- int mpcc_id);
+ const bool enable,
+ const bool lut_bank_a,
+ const enum dc_cm_lut_size size,
+ const int mpcc_id);
+
+void mpc401_get_lut_mode(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const int mpcc_id,
+ bool *enable,
+ bool *lut_bank_a);
void mpc401_program_lut_read_write_control(
struct mpc *mpc,
const enum MCM_LUT_ID id,
- bool lut_bank_a,
- int mpcc_id);
+ const bool lut_bank_a,
+ const unsigned int bit_depth,
+ const int mpcc_id);
void mpc401_set_gamut_remap(
struct mpc *mpc,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c
index 304b23109fb0..507dbdbea600 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c
@@ -20,7 +20,7 @@
mpc42->mpc_shift->field_name, mpc42->mpc_mask->field_name
-static void mpc42_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+void mpc42_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
{
mpcc->mpcc_id = mpcc_inst;
mpcc->dpp_id = 0xf;
@@ -63,154 +63,6 @@ void mpc42_update_blending(
mpcc->blnd_cfg = *blnd_cfg;
}
-/* Shaper functions */
-void mpc42_power_on_shaper_3dlut(
- struct mpc *mpc,
- uint32_t mpcc_id,
- bool power_on)
-{
- uint32_t power_status_shaper = 2;
- uint32_t power_status_3dlut = 2;
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
- int max_retries = 10;
-
- REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0,
- MPCC_MCM_3DLUT_MEM_PWR_DIS, power_on == true ? 1:0);
- REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0,
- MPCC_MCM_SHAPER_MEM_PWR_DIS, power_on == true ? 1:0);
- /* wait for memory to fully power up */
- if (power_on && mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
- REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_SHAPER_MEM_PWR_STATE, 0, 1, max_retries);
- REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_3DLUT_MEM_PWR_STATE, 0, 1, max_retries);
- }
-
- /*read status is not mandatory, it is just for debugging*/
- REG_GET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_SHAPER_MEM_PWR_STATE, &power_status_shaper);
- REG_GET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_3DLUT_MEM_PWR_STATE, &power_status_3dlut);
-
- if (power_status_shaper != 0 && power_on == true)
- BREAK_TO_DEBUGGER();
-
- if (power_status_3dlut != 0 && power_on == true)
- BREAK_TO_DEBUGGER();
-}
-
-void mpc42_configure_shaper_lut(
- struct mpc *mpc,
- bool is_ram_a,
- uint32_t mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-
- REG_UPDATE(MPCC_MCM_SHAPER_SCALE_G_B[mpcc_id],
- MPCC_MCM_SHAPER_SCALE_B, 0x7000);
- REG_UPDATE(MPCC_MCM_SHAPER_SCALE_G_B[mpcc_id],
- MPCC_MCM_SHAPER_SCALE_G, 0x7000);
- REG_UPDATE(MPCC_MCM_SHAPER_SCALE_R[mpcc_id],
- MPCC_MCM_SHAPER_SCALE_R, 0x7000);
- REG_UPDATE(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK[mpcc_id],
- MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, 7);
- REG_UPDATE(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK[mpcc_id],
- MPCC_MCM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
- REG_SET(MPCC_MCM_SHAPER_LUT_INDEX[mpcc_id], 0, MPCC_MCM_SHAPER_LUT_INDEX, 0);
-}
-
-
-void mpc42_program_3dlut_size(struct mpc *mpc, uint32_t width, int mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
- uint32_t size = 0xff;
-
- REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, &size);
-
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE,
- (width == 33) ? 2 :
- (width == 17) ? 0 : 2);
-
- REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, &size);
-}
-
-void mpc42_program_3dlut_fl_bias_scale(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-
- REG_UPDATE_2(MPCC_MCM_3DLUT_OUT_OFFSET_R[mpcc_id],
- MPCC_MCM_3DLUT_OUT_OFFSET_R, bias,
- MPCC_MCM_3DLUT_OUT_SCALE_R, scale);
-
- REG_UPDATE_2(MPCC_MCM_3DLUT_OUT_OFFSET_G[mpcc_id],
- MPCC_MCM_3DLUT_OUT_OFFSET_G, bias,
- MPCC_MCM_3DLUT_OUT_SCALE_G, scale);
-
- REG_UPDATE_2(MPCC_MCM_3DLUT_OUT_OFFSET_B[mpcc_id],
- MPCC_MCM_3DLUT_OUT_OFFSET_B, bias,
- MPCC_MCM_3DLUT_OUT_SCALE_B, scale);
-}
-
-void mpc42_program_bit_depth(struct mpc *mpc, uint16_t bit_depth, int mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-
- REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_WRITE_EN_MASK, 0xF);
-
- //program bit_depth
- REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id],
- MPCC_MCM_3DLUT_30BIT_EN,
- (bit_depth == 10) ? 1 : 0);
-}
-
-bool mpc42_is_config_supported(uint32_t width)
-{
- if (width == 17)
- return true;
-
- return false;
-}
-
-void mpc42_populate_lut(struct mpc *mpc, const union mcm_lut_params params,
- bool lut_bank_a, int mpcc_id)
-{
- const enum dc_lut_mode next_mode = lut_bank_a ? LUT_RAM_A : LUT_RAM_B;
- const struct pwl_params *lut_shaper = params.pwl;
-
- if (lut_shaper == NULL)
- return;
- if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
- mpc42_power_on_shaper_3dlut(mpc, mpcc_id, true);
-
- mpc42_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, mpcc_id);
-
- if (next_mode == LUT_RAM_A)
- mpc32_program_shaper_luta_settings(mpc, lut_shaper, mpcc_id);
- else
- mpc32_program_shaper_lutb_settings(mpc, lut_shaper, mpcc_id);
-
- mpc32_program_shaper_lut(
- mpc, lut_shaper->rgb_resulted, lut_shaper->hw_points_num, mpcc_id);
-
- mpc42_power_on_shaper_3dlut(mpc, mpcc_id, false);
-}
-
-void mpc42_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_ID id,
- bool lut_bank_a, bool enabled, int mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-
- switch (id) {
- case MCM_LUT_3DLUT:
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE,
- (!enabled) ? 0 :
- (lut_bank_a) ? 1 : 2);
- REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_RAM_SEL, lut_bank_a ? 0 : 1);
- break;
- case MCM_LUT_SHAPER:
- mpc32_configure_shaper_lut(mpc, lut_bank_a, mpcc_id);
- break;
- default:
- break;
- }
-}
-
/* RMCM Shaper functions */
void mpc42_power_on_rmcm_shaper_3dlut(
struct mpc *mpc,
@@ -674,32 +526,47 @@ void mpc42_program_rmcm_lut_read_write_control(struct mpc *mpc, const enum MCM_L
}
}
-void mpc42_program_lut_mode(struct mpc *mpc, const enum MCM_LUT_XABLE xable,
- bool lut_bank_a, int mpcc_id)
+void mpc42_program_lut_mode(struct mpc *mpc,
+ bool enable,
+ bool lut_bank_a,
+ int mpcc_id)
{
struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
- switch (xable) {
- case MCM_LUT_DISABLE:
+ if (enable) {
+ REG_UPDATE(MPC_RMCM_SHAPER_CONTROL[mpcc_id], MPC_RMCM_SHAPER_LUT_MODE, lut_bank_a ? 1 : 2);
+ } else {
REG_UPDATE(MPC_RMCM_SHAPER_CONTROL[mpcc_id], MPC_RMCM_SHAPER_LUT_MODE, 0);
+ }
+}
+
+static uint32_t mpc42_get_rmcm_3dlut_width(
+ const enum dc_cm_lut_size size)
+{
+ uint32_t width = 0;
+
+ switch (size) {
+ case CM_LUT_SIZE_333333:
+ width = 2;
break;
- case MCM_LUT_ENABLE:
- REG_UPDATE(MPC_RMCM_SHAPER_CONTROL[mpcc_id], MPC_RMCM_SHAPER_LUT_MODE, lut_bank_a ? 1 : 2);
+ case CM_LUT_SIZE_171717:
+ default:
+ width = 0;
break;
}
+
+ return width;
}
-void mpc42_program_rmcm_3dlut_size(struct mpc *mpc, uint32_t width, int mpcc_id)
+void mpc42_program_rmcm_3dlut_size(struct mpc *mpc,
+ const enum dc_cm_lut_size size,
+ int mpcc_id)
{
struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
- uint32_t size = 0xff;
-
- REG_GET(MPC_RMCM_3DLUT_MODE[mpcc_id], MPC_RMCM_3DLUT_SIZE, &size);
+ uint32_t width = mpc42_get_rmcm_3dlut_width(size);
- REG_UPDATE(MPC_RMCM_3DLUT_MODE[mpcc_id], MPC_RMCM_3DLUT_SIZE,
- (width == 33) ? 2 : 0);
-
- REG_GET(MPC_RMCM_3DLUT_MODE[mpcc_id], MPC_RMCM_3DLUT_SIZE, &size);
+ REG_UPDATE(MPC_RMCM_3DLUT_MODE[mpcc_id],
+ MPC_RMCM_3DLUT_SIZE, width);
}
void mpc42_program_rmcm_3dlut_fast_load_bias_scale(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id)
@@ -731,14 +598,6 @@ void mpc42_program_rmcm_bit_depth(struct mpc *mpc, uint16_t bit_depth, int mpcc_
(bit_depth == 10) ? 1 : 0);
}
-bool mpc42_is_rmcm_config_supported(uint32_t width)
-{
- if (width == 17 || width == 33)
- return true;
-
- return false;
-}
-
void mpc42_set_fl_config(
struct mpc *mpc,
struct mpc_fl_3dlut_config *cfg,
@@ -746,6 +605,7 @@ void mpc42_set_fl_config(
{
struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
+ uint32_t width = mpc42_get_rmcm_3dlut_width(cfg->size);
/*
From: Jie Zhou
@@ -786,7 +646,7 @@ void mpc42_set_fl_config(
//width
REG_UPDATE_2(MPC_RMCM_3DLUT_MODE[mpcc_id],
- MPC_RMCM_3DLUT_SIZE, (cfg->width == 33) ? 2 : 0,
+ MPC_RMCM_3DLUT_SIZE, width,
MPC_RMCM_3DLUT_MODE, (!cfg->enabled) ? 0 : (cfg->select_lut_bank_a) ? 1 : 2);
//connect to hubp
@@ -799,182 +659,6 @@ void mpc42_set_fl_config(
REG_UPDATE(MPC_RMCM_CNTL[mpcc_id], MPC_RMCM_CNTL, cfg->enabled ? 0 : 0xF);
}
-//static void rmcm_program_gamut_remap(
-// struct mpc *mpc,
-// unsigned int mpcc_id,
-// const uint16_t *regval,
-// enum mpcc_gamut_remap_id gamut_remap_block_id,
-// enum mpcc_gamut_remap_mode_select mode_select)
-//{
-// struct color_matrices_reg gamut_regs;
-// struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-//
-// if (gamut_remap_block_id == MPCC_OGAM_GAMUT_REMAP ||
-// gamut_remap_block_id == MPCC_MCM_FIRST_GAMUT_REMAP ||
-// gamut_remap_block_id == MPCC_MCM_SECOND_GAMUT_REMAP) {
-// mpc_program_gamut_remap(mpc, mpcc_id, regval, gamut_remap_block_id, mode_select);
-// return;
-// }
-// if (gamut_remap_block_id == MPCC_OGAM_GAMUT_REMAP) {
-//
-// if (regval == NULL || mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
-// REG_SET(MPC_RMCM_GAMUT_REMAP_MODE[mpcc_id], 0,
-// MPC_RMCM_GAMUT_REMAP_MODE, mode_select);
-// return;
-// }
-//
-// gamut_regs.shifts.csc_c11 = mpc42->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
-// gamut_regs.masks.csc_c11 = mpc42->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
-// gamut_regs.shifts.csc_c12 = mpc42->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
-// gamut_regs.masks.csc_c12 = mpc42->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
-//
-// switch (mode_select) {
-// case MPCC_GAMUT_REMAP_MODE_SELECT_1:
-// gamut_regs.csc_c11_c12 = REG(MPC_RMCM_GAMUT_REMAP_C11_C12_A[mpcc_id]);
-// gamut_regs.csc_c33_c34 = REG(MPC_RMCM_GAMUT_REMAP_C33_C34_A[mpcc_id]);
-// break;
-// case MPCC_GAMUT_REMAP_MODE_SELECT_2:
-// gamut_regs.csc_c11_c12 = REG(MPC_RMCM_GAMUT_REMAP_C11_C12_B[mpcc_id]);
-// gamut_regs.csc_c33_c34 = REG(MPC_RMCM_GAMUT_REMAP_C33_C34_B[mpcc_id]);
-// break;
-// default:
-// break;
-// }
-//
-// cm_helper_program_color_matrices(
-// mpc->ctx,
-// regval,
-// &gamut_regs);
-//
-// //select coefficient set to use, set A (MODE_1) or set B (MODE_2)
-// REG_SET(MPC_RMCM_GAMUT_REMAP_MODE[mpcc_id], 0, MPC_RMCM_GAMUT_REMAP_MODE, mode_select);
-// }
-//}
-
-//static bool is_mpc_legacy_gamut_id(enum mpcc_gamut_remap_id gamut_remap_block_id)
-//{
-// if (gamut_remap_block_id == MPCC_OGAM_GAMUT_REMAP ||
-// gamut_remap_block_id == MPCC_MCM_FIRST_GAMUT_REMAP ||
-// gamut_remap_block_id == MPCC_MCM_SECOND_GAMUT_REMAP) {
-// return true;
-// }
-// return false;
-//}
-//static void program_gamut_remap(
-// struct mpc *mpc,
-// unsigned int mpcc_id,
-// const uint16_t *regval,
-// enum mpcc_gamut_remap_id gamut_remap_block_id,
-// enum mpcc_gamut_remap_mode_select mode_select)
-//{
-// if (is_mpc_legacy_gamut_id(gamut_remap_block_id))
-// mpc_program_gamut_remap(mpc, mpcc_id, regval, gamut_remap_block_id, mode_select);
-// else
-// rmcm_program_gamut_remap(mpc, mpcc_id, regval, gamut_remap_block_id, mode_select);
-//}
-
-//void mpc42_set_gamut_remap(
-// struct mpc *mpc,
-// int mpcc_id,
-// const struct mpc_grph_gamut_adjustment *adjust)
-//{
-// struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-// unsigned int i = 0;
-// uint32_t mode_select = 0;
-//
-// if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) {
-// /* Bypass / Disable if type is bypass or hw */
-// program_gamut_remap(mpc, mpcc_id, NULL,
-// adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0);
-// } else {
-// struct fixed31_32 arr_matrix[12];
-// uint16_t arr_reg_val[12];
-//
-// for (i = 0; i < 12; i++)
-// arr_matrix[i] = adjust->temperature_matrix[i];
-//
-// convert_float_matrix(arr_reg_val, arr_matrix, 12);
-//
-// if (is_mpc_legacy_gamut_id(adjust->mpcc_gamut_remap_block_id))
-// REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id],
-// MPCC_GAMUT_REMAP_MODE_CURRENT, &mode_select);
-// else
-// REG_GET(MPC_RMCM_GAMUT_REMAP_MODE[mpcc_id],
-// MPC_RMCM_GAMUT_REMAP_MODE_CURRENT, &mode_select);
-//
-// //If current set in use not set A (MODE_1), then use set A, otherwise use set B
-// if (mode_select != MPCC_GAMUT_REMAP_MODE_SELECT_1)
-// mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_1;
-// else
-// mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2;
-//
-// program_gamut_remap(mpc, mpcc_id, arr_reg_val,
-// adjust->mpcc_gamut_remap_block_id, mode_select);
-// }
-//}
-
-//static void read_gamut_remap(struct mpc *mpc,
-// int mpcc_id,
-// uint16_t *regval,
-// enum mpcc_gamut_remap_id gamut_remap_block_id,
-// uint32_t *mode_select)
-//{
-// struct color_matrices_reg gamut_regs = {0};
-// struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-//
-// if (is_mpc_legacy_gamut_id(gamut_remap_block_id)) {
-// mpc_read_gamut_remap(mpc, mpcc_id, regval, gamut_remap_block_id, mode_select);
-// }
-// if (gamut_remap_block_id == MPCC_RMCM_GAMUT_REMAP) {
-// //current coefficient set in use
-// REG_GET(MPC_RMCM_GAMUT_REMAP_MODE[mpcc_id], MPC_RMCM_GAMUT_REMAP_MODE, mode_select);
-//
-// gamut_regs.shifts.csc_c11 = mpc42->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
-// gamut_regs.masks.csc_c11 = mpc42->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
-// gamut_regs.shifts.csc_c12 = mpc42->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
-// gamut_regs.masks.csc_c12 = mpc42->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
-//
-// switch (*mode_select) {
-// case MPCC_GAMUT_REMAP_MODE_SELECT_1:
-// gamut_regs.csc_c11_c12 = REG(MPC_RMCM_GAMUT_REMAP_C11_C12_A[mpcc_id]);
-// gamut_regs.csc_c33_c34 = REG(MPC_RMCM_GAMUT_REMAP_C33_C34_A[mpcc_id]);
-// break;
-// case MPCC_GAMUT_REMAP_MODE_SELECT_2:
-// gamut_regs.csc_c11_c12 = REG(MPC_RMCM_GAMUT_REMAP_C11_C12_B[mpcc_id]);
-// gamut_regs.csc_c33_c34 = REG(MPC_RMCM_GAMUT_REMAP_C33_C34_B[mpcc_id]);
-// break;
-// default:
-// break;
-// }
-// }
-//
-// if (*mode_select != MPCC_GAMUT_REMAP_MODE_SELECT_0) {
-// cm_helper_read_color_matrices(
-// mpc42->base.ctx,
-// regval,
-// &gamut_regs);
-// }
-//}
-
-//void mpc42_get_gamut_remap(struct mpc *mpc,
-// int mpcc_id,
-// struct mpc_grph_gamut_adjustment *adjust)
-//{
-// uint16_t arr_reg_val[12] = {0};
-// uint32_t mode_select;
-//
-// read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
-//
-// if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
-// adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
-// return;
-// }
-//
-// adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
-// convert_hw_matrix(adjust->temperature_matrix,
-// arr_reg_val, ARRAY_SIZE(arr_reg_val));
-//}
-
void mpc42_read_mpcc_state(
struct mpc *mpc,
int mpcc_inst,
@@ -1071,14 +755,7 @@ static const struct mpc_funcs dcn42_mpc_funcs = {
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
- .mcm = {
- .program_lut_read_write_control = mpc42_program_lut_read_write_control,
- .program_3dlut_size = mpc42_program_3dlut_size,
- .program_bias_scale = mpc42_program_3dlut_fl_bias_scale,
- .program_bit_depth = mpc42_program_bit_depth,
- .is_config_supported = mpc42_is_config_supported,
- .populate_lut = mpc42_populate_lut,
- },
+ .get_lut_mode = mpc401_get_lut_mode,
.rmcm = {
.enable_3dlut_fl = mpc42_enable_3dlut_fl,
.update_3dlut_fast_load_select = mpc42_update_3dlut_fast_load_select,
@@ -1087,7 +764,6 @@ static const struct mpc_funcs dcn42_mpc_funcs = {
.program_3dlut_size = mpc42_program_rmcm_3dlut_size,
.program_bias_scale = mpc42_program_rmcm_3dlut_fast_load_bias_scale,
.program_bit_depth = mpc42_program_rmcm_bit_depth,
- .is_config_supported = mpc42_is_rmcm_config_supported,
.power_on_shaper_3dlut = mpc42_power_on_rmcm_shaper_3dlut,
.populate_lut = mpc42_populate_rmcm_lut,
.fl_3dlut_configure = mpc42_set_fl_config,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h
index 9b87fd2be904..a5f7f4f2bb3b 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h
@@ -882,49 +882,7 @@ void dcn42_mpc_construct(struct dcn42_mpc *mpc401,
int num_mpcc,
int num_rmu);
-
-void mpc42_program_shaper_lutb_settings(
- struct mpc *mpc,
- const struct pwl_params *params,
- uint32_t mpcc_id);
-void mpc42_program_shaper_luta_settings(
- struct mpc *mpc,
- const struct pwl_params *params,
- uint32_t mpcc_id);
-void mpc42_configure_shaper_lut(
- struct mpc *mpc,
- bool is_ram_a,
- uint32_t mpcc_id);
-void mpc42_power_on_shaper_3dlut(
- struct mpc *mpc,
- uint32_t mpcc_id,
- bool power_on);
-void mpc42_program_3dlut_size(
- struct mpc *mpc,
- uint32_t width,
- int mpcc_id);
-void mpc42_program_3dlut_fl_bias_scale(
- struct mpc *mpc,
- uint16_t bias,
- uint16_t scale,
- int mpcc_id);
-void mpc42_program_bit_depth(
- struct mpc *mpc,
- uint16_t bit_depth,
- int mpcc_id);
-void mpc42_populate_lut(
- struct mpc *mpc,
- const union mcm_lut_params params,
- bool lut_bank_a,
- int mpcc_id);
-void mpc42_program_lut_read_write_control(
- struct mpc *mpc,
- const enum MCM_LUT_ID id,
- bool lut_bank_a,
- bool enabled,
- int mpcc_id);
-
-bool mpc42_is_config_supported(uint32_t width);
+void mpc42_init_mpcc(struct mpcc *mpcc, int mpcc_inst);
/* RMCM */
void mpc42_program_rmcm_shaper_lut(
@@ -969,12 +927,12 @@ void mpc42_program_rmcm_lut_read_write_control(
int mpcc_id);
void mpc42_program_lut_mode(
struct mpc *mpc,
- const enum MCM_LUT_XABLE xable,
+ bool enable,
bool lut_bank_a,
int mpcc_id);
void mpc42_program_rmcm_3dlut_size(
struct mpc *mpc,
- uint32_t width,
+ const enum dc_cm_lut_size size,
int mpcc_id);
void mpc42_program_rmcm_3dlut_fast_load_bias_scale(
struct mpc *mpc,
@@ -986,8 +944,6 @@ void mpc42_program_rmcm_bit_depth(
uint16_t bit_depth,
int mpcc_id);
-bool mpc42_is_rmcm_config_supported(uint32_t width);
-
void mpc42_set_fl_config(
struct mpc *mpc,
struct mpc_fl_3dlut_config *cfg,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index cf05620fd8f5..138081e6cc97 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -218,7 +218,7 @@
uint32_t OTG_CRC_SIG_BLUE_CONTROL_MASK; \
uint32_t OTG_CRC_SIG_RED_GREEN_MASK; \
uint32_t OTG_DLPC_CONTROL; \
- uint32_t OTG_DRR_CONTROL2; \
+ uint32_t OTG_DRR_CONTOL2; \
uint32_t OTG_DRR_TIMING_INT_STATUS; \
uint32_t OTG_GLOBAL_CONTROL3; \
uint32_t OTG_GLOBAL_SYNC_STATUS; \
@@ -676,6 +676,10 @@ struct dcn_optc_registers {
type OTG_V_COUNT_STOP_TIMER;
#define TG_REG_FIELD_LIST_DCN3_6(type) \
+ type OPTC_RSMU_UNDERFLOW_CLEAR;\
+ type OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS;\
+ type OPTC_RSMU_UNDERFLOW_INT_EN;\
+ type OPTC_RSMU_UNDERFLOW_INT_STATUS;\
type OTG_CRC_POLY_SEL; \
type CRC0_R_CR32; \
type CRC0_G_Y32; \
@@ -703,7 +707,7 @@ struct dcn_optc_shift {
TG_REG_FIELD_LIST_DCN3_5(uint8_t)
TG_REG_FIELD_LIST_DCN3_6(uint8_t)
TG_REG_FIELD_LIST_DCN401(uint8_t)
- TG_REG_FIELD_LIST_DCN42(uint8_t)
+ TG_REG_FIELD_LIST_DCN42(uint8_t)
};
struct dcn_optc_mask {
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index c6417538090f..893d2aff1f82 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -363,7 +363,7 @@ void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_st
optc_reg_state->otg_crc3_data_rg = REG_READ(OTG_CRC3_DATA_RG);
optc_reg_state->otg_dlpc_control = REG_READ(OTG_DLPC_CONTROL);
optc_reg_state->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
- optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTROL2);
+ optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTOL2);
optc_reg_state->otg_drr_control = REG_READ(OTG_DRR_CONTROL);
optc_reg_state->otg_drr_timing_int_status = REG_READ(OTG_DRR_TIMING_INT_STATUS);
optc_reg_state->otg_drr_trigger_window = REG_READ(OTG_DRR_TRIGGER_WINDOW);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c
index effd05b3685f..ed66a2bbb8ae 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c
@@ -6,11 +6,13 @@
#include "dcn30/dcn30_optc.h"
#include "dcn31/dcn31_optc.h"
#include "dcn32/dcn32_optc.h"
+#include "dcn35/dcn35_optc.h"
#include "dcn401/dcn401_optc.h"
#include "reg_helper.h"
#include "dc.h"
#include "dcn_calc_math.h"
#include "dc_dmub_srv.h"
+#include "dc_trace.h"
#define REG(reg)\
optc1->tg_regs->reg
@@ -108,6 +110,89 @@ void optc42_disable_pwa(struct timing_generator *optc)
REG_UPDATE(OTG_PWA_FRAME_SYNC_CONTROL,
OTG_PWA_FRAME_SYNC_EN, 0);
}
+void optc42_clear_optc_underflow(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1);
+ REG_UPDATE(OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_CLEAR, 1);
+}
+bool optc42_is_optc_underflow_occurred(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t underflow_occurred = 0, rsmu_underflow_occurred = 0;
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS,
+ &underflow_occurred);
+
+ REG_GET(OPTC_RSMU_UNDERFLOW,
+ OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS,
+ &rsmu_underflow_occurred);
+ return (underflow_occurred == 1 || rsmu_underflow_occurred);
+}
+/* disable_crtc */
+bool optc42_disable_crtc(struct timing_generator *optc)
+{
+ optc401_disable_crtc(optc);
+ optc42_clear_optc_underflow(optc);
+
+ return true;
+}
+static void optc42_set_timing_double_buffer(struct timing_generator *optc, bool enable)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t mode = enable ? 2 : 0;
+ /* actually we have 4 modes now, use as the same as previous dcn3x
+ * 00 OTG_DOUBLE_BUFFER_CONTROL_OTG_DRR_TIMING_DBUF_UPDATE_MODE_0 Double buffer update occurs at any time in a frame.
+ * 01 OTG_DOUBLE_BUFFER_CONTROL_OTG_DRR_TIMING_DBUF_UPDATE_MODE_1 Double buffer update occurs at OTG start of frame.
+ * 02 OTG_DOUBLE_BUFFER_CONTROL_OTG_DRR_TIMING_DBUF_UPDATE_MODE_2 Double buffer occurs DP start of frame.
+ * 03 OTG_DOUBLE_BUFFER_CONTROL_OTG_DRR_TIMING_DBUF_UPDATE_MODE_3 Reserved.
+ */
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
+}
+void optc42_tg_init(struct timing_generator *optc)
+{
+ optc42_set_timing_double_buffer(optc, true);
+ optc42_clear_optc_underflow(optc);
+}
+
+void optc42_lock_doublebuffer_enable(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t v_blank_start = 0;
+ uint32_t v_blank_end = 0;
+ uint32_t h_blank_start = 0;
+ uint32_t h_blank_end = 0;
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &v_blank_start,
+ OTG_V_BLANK_END, &v_blank_end);
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &h_blank_start,
+ OTG_H_BLANK_END, &h_blank_end);
+
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start,
+ MASTER_UPDATE_LOCK_DB_END_Y, v_blank_start);
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL4,
+ DIG_UPDATE_POSITION_X, 20,
+ DIG_UPDATE_POSITION_Y, v_blank_start);
+ REG_UPDATE_3(OTG_GLOBAL_CONTROL0,
+ MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1,
+ MASTER_UPDATE_LOCK_DB_END_X, h_blank_end,
+ MASTER_UPDATE_LOCK_DB_EN, 1);
+ REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
+
+ TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);
+}
static struct timing_generator_funcs dcn42_tg_funcs = {
.validate_timing = optc1_validate_timing,
@@ -117,7 +202,7 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc401_program_global_sync,
.enable_crtc = optc401_enable_crtc,
- .disable_crtc = optc401_disable_crtc,
+ .disable_crtc = optc42_disable_crtc,
.phantom_crtc_post_enable = optc401_phantom_crtc_post_enable,
.disable_phantom_crtc = optc401_disable_phantom_otg,
/* used by enable_timing_synchronization. Not need for FPGA */
@@ -138,7 +223,7 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
.unlock = optc1_unlock,
- .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
+ .lock_doublebuffer_enable = optc42_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc401_set_drr,
@@ -147,13 +232,13 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
- .tg_init = optc3_tg_init,
+ .tg_init = optc42_tg_init,
.is_tg_enabled = optc1_is_tg_enabled,
- .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
- .clear_optc_underflow = optc1_clear_optc_underflow,
+ .is_optc_underflow_occurred = optc42_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc42_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc42_get_crc,
- .configure_crc = optc1_configure_crc,
+ .configure_crc = optc35_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
@@ -162,6 +247,7 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc401_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
+ .wait_otg_disable = optc35_wait_otg_disable,
.set_out_mux = optc401_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
.set_vtotal_change_limit = optc3_set_vtotal_change_limit,
@@ -171,6 +257,8 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
+ .init_odm = optc3_init_odm,
+ .set_long_vtotal = optc35_set_long_vtotal,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
@@ -178,6 +266,7 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.set_vupdate_keepout = optc401_set_vupdate_keepout,
.wait_update_lock_status = optc401_wait_update_lock_status,
.optc_read_reg_state = optc31_read_reg_state,
+ .read_otg_state = optc31_read_otg_state,
.enable_otg_pwa = optc42_enable_pwa,
.disable_otg_pwa = optc42_disable_pwa,
};
@@ -194,5 +283,9 @@ void dcn42_timing_generator_init(struct optc *optc1)
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
+ optc1->max_frame_count = 0xFFFFFF;
+
+ dcn35_timing_generator_set_fgcg(
+ optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc);
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h
index 45d2187efaca..fc7192f01b33 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h
@@ -119,6 +119,10 @@
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_DOUBLE_BUFFER_PENDING, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
+ SF(ODM0_OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
+ SF(ODM0_OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_CLEAR, mask_sh),\
+ SF(ODM0_OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_INT_EN, mask_sh),\
+ SF(ODM0_OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_INT_STATUS, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
@@ -202,10 +206,15 @@
SF(OTG0_OTG_PWA_FRAME_SYNC_CONTROL, OTG_PWA_FRAME_SYNC_EN, mask_sh),\
SF(OTG0_OTG_PWA_FRAME_SYNC_CONTROL, OTG_PWA_FRAME_SYNC_VCOUNT_MODE, mask_sh),\
SF(OTG0_OTG_PWA_FRAME_SYNC_CONTROL, OTG_PWA_FRAME_SYNC_LINE, mask_sh),\
- SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh),\
+ SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh)
void dcn42_timing_generator_init(struct optc *optc1);
void optc42_enable_pwa(struct timing_generator *optc, struct otc_pwa_frame_sync *pwa_sync_param);
void optc42_disable_pwa(struct timing_generator *optc);
-
+void optc42_tg_init(struct timing_generator *optc);
+void optc42_clear_optc_underflow(struct timing_generator *optc);
+bool optc42_is_optc_underflow_occurred(struct timing_generator *optc);
+bool optc42_disable_crtc(struct timing_generator *optc);
+void optc42_lock_doublebuffer_enable(struct timing_generator *optc);
#endif /* __DC_OPTC_DCN42_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 8b555187ac75..366576b1c617 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2341,8 +2341,6 @@ static bool init_soc_bounding_box(struct dc *dc,
struct _vcs_dpi_ip_params_st *loaded_ip =
get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev);
- DC_LOGGER_INIT(dc->ctx->logger);
-
if (pool->base.pp_smu) {
struct pp_smu_nv_clock_table max_clocks = {0};
unsigned int uclk_states[8] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
index 8e41367cf238..aef187bcf5c3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
@@ -53,7 +53,7 @@
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "clk_mgr.h"
-#include "virtual/virtual_stream_encoder.h"
+#include "dio/virtual/virtual_stream_encoder.h"
#include "dml/display_mode_vba.h"
#include "dcn42/dcn42_dccg.h"
#include "dcn10/dcn10_resource.h"
@@ -666,6 +666,7 @@ static const struct resource_caps res_cap_dcn42 = {
.num_vmid = 16,
.num_mpc_3dlut = 2,
.num_dsc = 4,
+ .num_rmcm = 2,
};
static const struct dc_plane_cap plane_cap = {
@@ -755,6 +756,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.dcc_meta_propagation_delay_us = 10,
.disable_timeout = true,
.min_disp_clk_khz = 50000,
+ .static_screen_wait_frames = 2,
.disable_z10 = false,
.ignore_pg = true,
.disable_stutter_for_wm_program = true,
@@ -2302,14 +2304,12 @@ static bool dcn42_resource_construct(
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN42_CRB_SEGMENT_SIZE_KB;
+ dc->dml2_options.gpuvm_enable = true;
+ dc->dml2_options.hostvm_enable = true;
/* SPL */
dc->caps.scl_caps.sharpener_support = true;
- /* init DC limited DML2 options */
- memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
- dc->dml2_dc_power_options.use_clock_dc_limits = true;
-
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
index bc93356a0b5b..d168fb1eacf7 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
@@ -8,11 +8,14 @@ soc_and_ip_translator_ccflags := $(CC_FLAGS_FPU)
soc_and_ip_translator_rcflags := $(CC_FLAGS_NO_FPU)
CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags)
soc_and_ip_translator := soc_and_ip_translator.o
soc_and_ip_translator += dcn401/dcn401_soc_and_ip_translator.o
+soc_and_ip_translator += dcn42/dcn42_soc_and_ip_translator.o
AMD_DAL_soc_and_ip_translator := $(addprefix $(AMDDALPATH)/dc/soc_and_ip_translator/, $(soc_and_ip_translator))
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
index 3190c76eb482..1b397fa7e05c 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
@@ -102,6 +102,9 @@ static void dcn401_convert_dc_clock_table_to_soc_bb_clock_table(
}
} else {
dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ #ifdef ENABLE_WCK
+ dml_clk_table->wck_ratio.clk_values_khz[i] = dc_clk_table->entries[i].wck_ratio;
+ #endif
}
} else {
dml_clk_table->uclk.clk_values_khz[i] = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
index c9e224d262c9..fd9c24b5df53 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
@@ -3,7 +3,7 @@
// Copyright 2025 Advanced Micro Devices, Inc.
#include "dcn42_soc_and_ip_translator.h"
-#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+#include "../dcn401/dcn401_soc_and_ip_translator.h"
#include "bounding_boxes/dcn42_soc_bb.h"
/* soc_and_ip_translator component used to get up-to-date values for bounding box.
@@ -11,13 +11,171 @@
* This component provides an interface to get DCN-specific bounding box values.
*/
+static void get_default_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ {
+ memcpy(soc_bb, &dml2_socbb_dcn42, sizeof(struct dml2_soc_bb));
+ memcpy(&soc_bb->qos_parameters, &dml_dcn42_variant_a_soc_qos_params, sizeof(struct dml2_soc_qos_parameters));
+ }
+}
+
+/*
+ * DC clock table is obtained from SMU during runtime.
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void dcn42_convert_dc_clock_table_to_soc_bb_clock_table(
+ struct dml2_soc_state_table *dml_clk_table,
+ struct dml2_soc_vmin_clock_limits *vmin_limit,
+ const struct clk_bw_params *dc_bw_params)
+{
+ int i;
+ const struct clk_limit_table *dc_clk_table;
+
+ if (dc_bw_params == NULL)
+ /* skip if bw params could not be obtained from smu */
+ return;
+
+ dc_clk_table = &dc_bw_params->clk_table;
+
+ /* fclk/dcfclk - dcn42 pmfw table can have 0 entries for inactive dpm levels
+ * for use with dml we need to fill in using an active value aiming for >= 2x DCFCLK
+ */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels && dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ int j, max_fclk = 0;
+
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ for (j = 0; j < MAX_NUM_DPM_LVL; j++) {
+ if (dc_clk_table->entries[j].fclk_mhz * 1000 > max_fclk)
+ max_fclk = dc_clk_table->entries[j].fclk_mhz * 1000;
+ dml_clk_table->fclk.clk_values_khz[i] = max_fclk;
+ if (max_fclk >= 2 * dml_clk_table->dcfclk.clk_values_khz[i])
+ break;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ dml_clk_table->wck_ratio.clk_values_khz[i] = dc_clk_table->entries[i].wck_ratio;
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->wck_ratio.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ }
+ }
+ vmin_limit->dispclk_khz = min(dc_clk_table->entries[0].dispclk_mhz * 1000, vmin_limit->dispclk_khz);
+ }
+
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dram config */
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+}
+
+static void dcn42_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ soc_bb->dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000;
+ soc_bb->dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ soc_bb->mall_allocated_for_dcn_mbytes = dc->caps.mall_size_total / (1024 * 1024);
+
+ if (dc->clk_mgr->funcs->is_smu_present &&
+ dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) {
+ dcn42_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table, &soc_bb->vmin_limit,
+ dc->clk_mgr->bw_params);
+ }
+}
+
+static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ /* Individual modification can be overwritten even if it was obtained by a previous function.
+ * Modifications are acquired in order of priority (lowest to highest).
+ */
+ dc_assert_fp_enabled();
+
+ dcn42_update_soc_bb_with_values_from_clk_mgr(soc_bb, dc);
+ dcn401_update_soc_bb_with_values_from_vbios(soc_bb, dc);
+ dcn401_update_soc_bb_with_values_from_software_policy(soc_bb, dc);
+}
+
+void dcn42_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ //get default soc_bb with static values
+ get_default_soc_bb(soc_bb, dc);
+ //update soc_bb values with more accurate values
+ apply_soc_bb_updates(soc_bb, dc, config);
+}
+
static void dcn42_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
{
*ip_caps = dml2_dcn42_max_ip_caps;
}
static struct soc_and_ip_translator_funcs dcn42_translator_funcs = {
- .get_soc_bb = dcn401_get_soc_bb,
+ .get_soc_bb = dcn42_get_soc_bb,
.get_ip_caps = dcn42_get_ip_caps,
};
@@ -25,3 +183,4 @@ void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and
{
soc_and_ip_translator->translator_funcs = &dcn42_translator_funcs;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
index 914dcbb369a7..1dded5426152 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
@@ -12,5 +12,6 @@
#include "soc_and_ip_translator.h"
void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+void dcn42_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
#endif /* _DCN42_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
index 6617c9d2d5f8..bad0bd79fa94 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
@@ -4,6 +4,7 @@
#include "soc_and_ip_translator.h"
#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h"
static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator,
enum dce_version dc_version)
@@ -12,6 +13,9 @@ static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc
case DCN_VERSION_4_01:
dcn401_construct_soc_and_ip_translator(soc_and_ip_translator);
break;
+ case DCN_VERSION_4_2:
+ dcn42_construct_soc_and_ip_translator(soc_and_ip_translator);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 2abbc6c97850..e11e32afac6b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -909,7 +909,8 @@ union dmub_fw_meta_feature_bits {
struct {
uint32_t shared_state_link_detection : 1; /**< 1 supports link detection via shared state */
uint32_t cursor_offload_v1_support: 1; /**< 1 supports cursor offload */
- uint32_t reserved : 30;
+ uint32_t inbox0_lock_support: 1; /**< 1 supports inbox0 lock mechanism */
+ uint32_t reserved : 29;
} bits; /**< status bits */
uint32_t all; /**< 32-bit access to status bits */
};
@@ -1535,14 +1536,12 @@ enum dmub_gpint_command {
* 1 - Enable ips measurement
*/
DMUB_GPINT__IPS_RESIDENCY = 121,
-
/**
* DESC: Enable measurements for various task duration
* ARGS: 0 - Disable measurement
* 1 - Enable measurement
*/
DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123,
-
/**
* DESC: Gets IPS residency in microseconds
* ARGS: 0 - Return IPS1 residency
@@ -1552,21 +1551,18 @@ enum dmub_gpint_command {
* RETURN: Total residency in microseconds - lower 32 bits
*/
DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO = 124,
-
/**
* DESC: Gets IPS1 histogram counts
* ARGS: Bucket index
* RETURN: Total count for the bucket
*/
DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER = 125,
-
/**
* DESC: Gets IPS2 histogram counts
* ARGS: Bucket index
* RETURN: Total count for the bucket
*/
DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER = 126,
-
/**
* DESC: Gets IPS residency
* ARGS: 0 - Return IPS1 residency
@@ -1576,21 +1572,18 @@ enum dmub_gpint_command {
* RETURN: Total residency in milli-percent.
*/
DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT = 127,
-
/**
* DESC: Gets IPS1_RCG histogram counts
* ARGS: Bucket index
* RETURN: Total count for the bucket
*/
DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER = 128,
-
/**
* DESC: Gets IPS1_ONO2_ON histogram counts
* ARGS: Bucket index
* RETURN: Total count for the bucket
*/
DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER = 129,
-
/**
* DESC: Gets IPS entry counter during residency measurement
* ARGS: 0 - Return IPS1 entry counts
@@ -1600,7 +1593,6 @@ enum dmub_gpint_command {
* RETURN: Entry counter for selected IPS mode
*/
DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER = 130,
-
/**
* DESC: Gets IPS inactive residency in microseconds
* ARGS: 0 - Return IPS1_MAX residency
@@ -1610,7 +1602,6 @@ enum dmub_gpint_command {
* RETURN: Total inactive residency in microseconds - lower 32 bits
*/
DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO = 131,
-
/**
* DESC: Gets IPS inactive residency in microseconds
* ARGS: 0 - Return IPS1_MAX residency
@@ -1620,7 +1611,6 @@ enum dmub_gpint_command {
* RETURN: Total inactive residency in microseconds - upper 32 bits
*/
DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI = 132,
-
/**
* DESC: Gets IPS residency in microseconds
* ARGS: 0 - Return IPS1 residency
@@ -1679,7 +1669,7 @@ union dmub_inbox0_cmd_lock_hw {
uint32_t lock: 1; /**< Lock */
uint32_t should_release: 1; /**< Release */
- uint32_t reserved: 7; /**< Reserved for extending more clients, HW, etc. */
+ uint32_t reserved: 7; /**< Reserved for extending more clients, HW, etc. */
} bits;
uint32_t all;
};
@@ -1903,6 +1893,11 @@ enum dmub_cmd_type {
DMUB_CMD__IHC = 95,
/**
+ * Command type use for boot time crc commands.
+ */
+ DMUB_CMD__BOOT_TIME_CRC = 96,
+
+ /**
* Command type use for VBIOS shared commands.
*/
DMUB_CMD__VBIOS = 128,
@@ -2614,9 +2609,9 @@ struct dmub_fams2_stream_static_state {
uint8_t allow_to_target_delay_otg_vlines; // time from allow vline to target vline
union {
struct {
- uint8_t is_drr: 1; // stream is DRR enabled
- uint8_t clamp_vtotal_min: 1; // clamp vtotal to min instead of nominal
- uint8_t min_ttu_vblank_usable: 1; // if min ttu vblank is above wm, no force pstate is needed in blank
+ uint8_t is_drr : 1; // stream is DRR enabled
+ uint8_t clamp_vtotal_min : 1; // clamp vtotal to min instead of nominal
+ uint8_t min_ttu_vblank_usable : 1; // if min ttu vblank is above wm, no force pstate is needed in blank
} bits;
uint8_t all;
} config;
@@ -4441,6 +4436,7 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
REPLAY_GENERAL_CMD_VIDEO_CONFERENCING,
REPLAY_GENERAL_CMD_SET_CONTINUOUSLY_RESYNC,
+ REPLAY_GENERAL_CMD_SET_COASTING_VTOTAL_WITHOUT_FRAME_UPDATE,
};
struct dmub_alpm_auxless_data {
@@ -4659,6 +4655,18 @@ struct dmub_rb_cmd_replay_enable_data {
* This does not support HDMI/DP2 for now.
*/
uint8_t phy_rate;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -5272,8 +5280,8 @@ enum dmub_cmd_lsdma_type {
*/
DMUB_CMD__LSDMA_LINEAR_COPY = 1,
/**
- * LSDMA copies data from source to destination linearly in sub window
- */
+ * LSDMA copies data from source to destination linearly in sub window
+ */
DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY = 2,
/**
* Send the tiled-to-tiled copy command
@@ -6836,6 +6844,29 @@ struct dmub_rb_cmd_pr_general_cmd {
};
/**
+ * Command type of a DMUB_CMD__BOOT_TIME_CRC command
+ */
+enum dmub_cmd_boot_time_crc_type {
+ DMUB_CMD__BOOT_TIME_CRC_INIT_MEM = 0
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__BOOT_TIME_CRC_INIT command.
+ */
+struct dmub_cmd_boot_time_crc_init_data {
+ union dmub_addr buffer_addr;
+ uint32_t buffer_size;
+};
+
+/**
+ * Definition of a DMUB_CMD__BOOT_TIME_CRC_INIT command.
+ */
+struct dmub_rb_cmd_boot_time_crc_init {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_boot_time_crc_init_data data;
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -7196,6 +7227,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IHC command.
*/
struct dmub_rb_cmd_ihc ihc;
+ /**
+ * Definition of a DMUB_CMD__BOOT_TIME_CRC_INIT command.
+ */
+ struct dmub_rb_cmd_boot_time_crc_init boot_time_crc_init;
};
/**
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 1afa10e85eb5..4a8ca0ac1266 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -64,6 +64,9 @@
#ifndef DP_PR_ERROR_STATUS // can remove this once the define gets into linux drm_dp_helper.h
#define DP_PR_ERROR_STATUS 0x2020 /* DP 2.0 */
#endif /* DP_PR_ERROR_STATUS */
+#ifndef DP_PR_REPLAY_SINK_STATUS // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PR_REPLAY_SINK_STATUS 0x2022
+#endif /* DP_PR_REPLAY_SINK_STATUS */
#ifndef DP_PR_LINK_CRC_ERROR // can remove this once the define gets into linux drm_dp_helper.h
#define DP_PR_LINK_CRC_ERROR (1 << 0)
#endif /* DP_PR_LINK_CRC_ERROR */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_offset.h
new file mode 100644
index 000000000000..aa3ef3a34013
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_offset.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2026 Advanced Micro Devices, Inc. */
+
+#ifndef _clk_15_0_0_OFFSET_HEADER
+#define _clk_15_0_0_OFFSET_HEADER
+
+// addressBlock: clk_clk8_0_SmuClkDec
+// base address: 0x6e000
+#define regCLK8_CLK0_DS_CNTL 0x4c14
+#define regCLK8_CLK0_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK1_DS_CNTL 0x4c1c
+#define regCLK8_CLK1_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK2_DS_CNTL 0x4c24
+#define regCLK8_CLK2_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK3_DS_CNTL 0x4c2c
+#define regCLK8_CLK3_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK4_DS_CNTL 0x4c34
+#define regCLK8_CLK4_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK0_BYPASS_CNTL 0x4c1a
+#define regCLK8_CLK0_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK1_BYPASS_CNTL 0x4c22
+#define regCLK8_CLK1_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK2_BYPASS_CNTL 0x4c2a
+#define regCLK8_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK3_BYPASS_CNTL 0x4c32
+#define regCLK8_CLK3_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK4_BYPASS_CNTL 0x4c3a
+#define regCLK8_CLK4_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK_TICK_CNT_CONFIG_REG 0x4c51
+#define regCLK8_CLK_TICK_CNT_CONFIG_REG_BASE_IDX 0
+#define regCLK8_CLK_TICK_CNT_STATUS 0x4c52
+#define regCLK8_CLK_TICK_CNT_STATUS_BASE_IDX 0
+#define regCLK8_CLK0_CURRENT_CNT 0x4c53
+#define regCLK8_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK8_CLK1_CURRENT_CNT 0x4c54
+#define regCLK8_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK8_CLK2_CURRENT_CNT 0x4c55
+#define regCLK8_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK8_CLK3_CURRENT_CNT 0x4c56
+#define regCLK8_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK8_CLK4_CURRENT_CNT 0x4c57
+#define regCLK8_CLK4_CURRENT_CNT_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_sh_mask.h
new file mode 100644
index 000000000000..c78622d06e1b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_sh_mask.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2026 Advanced Micro Devices, Inc. */
+
+#ifndef _clk_15_0_0_SH_MASK_HEADER
+#define _clk_15_0_0_SH_MASK_HEADER
+
+// addressBlock: clk_clk8_0_SmuClkDec
+//CLK8_CLK_TICK_CNT_CONFIG_REG
+#define CLK8_CLK_TICK_CNT_CONFIG_REG__TIMER_THRESHOLD__SHIFT 0x0
+#define CLK8_CLK_TICK_CNT_CONFIG_REG__TIMER_THRESHOLD_MASK 0xFFFFL
+//CLK8_CLK0_BYPASS_CNTL
+#define CLK8_CLK0_BYPASS_CNTL__CLK0_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK0_BYPASS_CNTL__CLK0_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK1_BYPASS_CNTL
+#define CLK8_CLK1_BYPASS_CNTL__CLK1_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK1_BYPASS_CNTL__CLK1_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK2_BYPASS_CNTL
+#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK3_BYPASS_CNTL
+#define CLK8_CLK3_BYPASS_CNTL__CLK3_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK3_BYPASS_CNTL__CLK3_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK4_BYPASS_CNTL
+#define CLK8_CLK4_BYPASS_CNTL__CLK4_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK4_BYPASS_CNTL__CLK4_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK0_DS_CNTL
+#define CLK8_CLK0_DS_CNTL__CLK0_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK0_DS_CNTL__CLK0_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK0_DS_CNTL__CLK0_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK0_DS_CNTL__CLK0_ALLOW_DS_MASK 0x00000010L
+//CLK8_CLK1_DS_CNTL
+#define CLK8_CLK1_DS_CNTL__CLK1_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK1_DS_CNTL__CLK1_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK1_DS_CNTL__CLK1_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK1_DS_CNTL__CLK1_ALLOW_DS_MASK 0x00000010L
+//CLK8_CLK2_DS_CNTL
+#define CLK8_CLK2_DS_CNTL__CLK2_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK2_DS_CNTL__CLK2_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK2_DS_CNTL__CLK2_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK2_DS_CNTL__CLK2_ALLOW_DS_MASK 0x00000010L
+//CLK8_CLK3_DS_CNTL
+#define CLK8_CLK3_DS_CNTL__CLK3_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK3_DS_CNTL__CLK3_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK3_DS_CNTL__CLK3_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK3_DS_CNTL__CLK3_ALLOW_DS_MASK 0x00000010L
+//CLK8_CLK4_DS_CNTL
+#define CLK8_CLK4_DS_CNTL__CLK4_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK4_DS_CNTL__CLK4_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK4_DS_CNTL__CLK4_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK4_DS_CNTL__CLK4_ALLOW_DS_MASK 0x00000010L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h
index 825201f4e113..52fbf2dc1899 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h
@@ -9010,6 +9010,8 @@
// base address: 0x0
#define regODM0_OPTC_INPUT_GLOBAL_CONTROL 0x1aca
#define regODM0_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_RSMU_UNDERFLOW 0x1acb
+#define regODM0_OPTC_RSMU_UNDERFLOW_BASE_IDX 2
#define regODM0_OPTC_UNDERFLOW_THRESHOLD 0x1acc
#define regODM0_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
#define regODM0_OPTC_DATA_SOURCE_SELECT 0x1acd
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h
index 4ed96244f61b..01fb53093369 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h
@@ -33583,6 +33583,15 @@
#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM0_OPTC_RSMU_UNDERFLOW
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_INT_EN__SHIFT 0x0
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x1
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_INT_STATUS__SHIFT 0x2
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_CLEAR__SHIFT 0x3
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_INT_EN_MASK 0x00000001L
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000002L
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_INT_STATUS_MASK 0x00000004L
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_CLEAR_MASK 0x00000008L
//ODM0_OPTC_UNDERFLOW_THRESHOLD
#define ODM0_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
#define ODM0_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_offset.h
new file mode 100644
index 000000000000..fa4e42a3ae9f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_offset.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _lsdma_7_1_0_OFFSET_HEADER
+#define _lsdma_7_1_0_OFFSET_HEADER
+
+#define regLSDMA_PIO_SRC_ADDR_LO 0x0080
+#define regLSDMA_PIO_SRC_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PIO_SRC_ADDR_HI 0x0081
+#define regLSDMA_PIO_SRC_ADDR_HI_BASE_IDX 0
+#define regLSDMA_PIO_DST_ADDR_LO 0x0082
+#define regLSDMA_PIO_DST_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PIO_DST_ADDR_HI 0x0083
+#define regLSDMA_PIO_DST_ADDR_HI_BASE_IDX 0
+#define regLSDMA_PIO_COMMAND 0x0084
+#define regLSDMA_PIO_COMMAND_BASE_IDX 0
+#define regLSDMA_PIO_CONSTFILL_DATA 0x0085
+#define regLSDMA_PIO_CONSTFILL_DATA_BASE_IDX 0
+#define regLSDMA_PIO_CONTROL 0x0086
+#define regLSDMA_PIO_CONTROL_BASE_IDX 0
+
+#define regLSDMA_PIO_STATUS 0x008a
+#define regLSDMA_PIO_STATUS_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_sh_mask.h
new file mode 100644
index 000000000000..cf83dacf4acf
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_sh_mask.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _lsdma_7_1_0_SH_MASK_HEADER
+#define _lsdma_7_1_0_SH_MASK_HEADER
+
+
+// addressBlock: lsdma0_lsdma0dec
+//LSDMA_PIO_STATUS
+#define LSDMA_PIO_STATUS__CMD_IN_FIFO__SHIFT 0x0
+#define LSDMA_PIO_STATUS__CMD_PROCESSING__SHIFT 0x3
+#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR__SHIFT 0xb
+#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT__SHIFT 0xc
+#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC__SHIFT 0xd
+#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC__SHIFT 0xe
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR__SHIFT 0xf
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR__SHIFT 0x10
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT__SHIFT 0x11
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT__SHIFT 0x12
+#define LSDMA_PIO_STATUS__ERROR_REQ_DROP__SHIFT 0x13
+#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define LSDMA_PIO_STATUS__PIO_FIFO_FULL__SHIFT 0x1d
+#define LSDMA_PIO_STATUS__PIO_IDLE__SHIFT 0x1f
+#define LSDMA_PIO_STATUS__CMD_IN_FIFO_MASK 0x00000007L
+#define LSDMA_PIO_STATUS__CMD_PROCESSING_MASK 0x000003F8L
+#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR_MASK 0x00000800L
+#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT_MASK 0x00001000L
+#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC_MASK 0x00002000L
+#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC_MASK 0x00004000L
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR_MASK 0x00008000L
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR_MASK 0x00010000L
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT_MASK 0x00020000L
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT_MASK 0x00040000L
+#define LSDMA_PIO_STATUS__ERROR_REQ_DROP_MASK 0x00080000L
+#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define LSDMA_PIO_STATUS__PIO_FIFO_FULL_MASK 0x20000000L
+#define LSDMA_PIO_STATUS__PIO_IDLE_MASK 0x80000000L
+//LSDMA_PIO_SRC_ADDR_LO
+#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO__SHIFT 0x0
+#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_PIO_SRC_ADDR_HI
+#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0xFFFFFFFFL
+//LSDMA_PIO_DST_ADDR_LO
+#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO__SHIFT 0x0
+#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_PIO_DST_ADDR_HI
+#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI_MASK 0xFFFFFFFFL
+//LSDMA_PIO_CONTROL
+#define LSDMA_PIO_CONTROL__VMID__SHIFT 0x0
+#define LSDMA_PIO_CONTROL__DST_GPA__SHIFT 0x4
+#define LSDMA_PIO_CONTROL__DST_SYS__SHIFT 0x5
+#define LSDMA_PIO_CONTROL__DST_GCC__SHIFT 0x6
+#define LSDMA_PIO_CONTROL__DST_SNOOP__SHIFT 0x7
+#define LSDMA_PIO_CONTROL__DST_REUSE_HINT__SHIFT 0x8
+#define LSDMA_PIO_CONTROL__DST_COMP_EN__SHIFT 0xa
+#define LSDMA_PIO_CONTROL__SRC_GPA__SHIFT 0x14
+#define LSDMA_PIO_CONTROL__SRC_SYS__SHIFT 0x15
+#define LSDMA_PIO_CONTROL__SRC_SNOOP__SHIFT 0x17
+#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT__SHIFT 0x18
+#define LSDMA_PIO_CONTROL__SRC_COMP_EN__SHIFT 0x1a
+#define LSDMA_PIO_CONTROL__VMID_MASK 0x0000000FL
+#define LSDMA_PIO_CONTROL__DST_GPA_MASK 0x00000010L
+#define LSDMA_PIO_CONTROL__DST_SYS_MASK 0x00000020L
+#define LSDMA_PIO_CONTROL__DST_GCC_MASK 0x00000040L
+#define LSDMA_PIO_CONTROL__DST_SNOOP_MASK 0x00000080L
+#define LSDMA_PIO_CONTROL__DST_REUSE_HINT_MASK 0x00000300L
+#define LSDMA_PIO_CONTROL__DST_COMP_EN_MASK 0x00000400L
+#define LSDMA_PIO_CONTROL__SRC_GPA_MASK 0x00100000L
+#define LSDMA_PIO_CONTROL__SRC_SYS_MASK 0x00200000L
+#define LSDMA_PIO_CONTROL__SRC_SNOOP_MASK 0x00800000L
+#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT_MASK 0x03000000L
+#define LSDMA_PIO_CONTROL__SRC_COMP_EN_MASK 0x04000000L
+//LSDMA_PIO_COMMAND
+#define LSDMA_PIO_COMMAND__COUNT__SHIFT 0x0
+#define LSDMA_PIO_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define LSDMA_PIO_COMMAND__CONSTANT_FILL__SHIFT 0x1f
+#define LSDMA_PIO_COMMAND__COUNT_MASK 0x03FFFFFFL
+#define LSDMA_PIO_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define LSDMA_PIO_COMMAND__CONSTANT_FILL_MASK 0x80000000L
+//LSDMA_PIO_CONSTFILL_DATA
+#define LSDMA_PIO_CONSTFILL_DATA__DATA__SHIFT 0x0
+#define LSDMA_PIO_CONSTFILL_DATA__DATA_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 710e328fad48..76c9f951bc1c 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -64,6 +64,19 @@ typedef struct binary_header
table_info table_list[TOTAL_TABLES];
} binary_header;
+typedef struct binary_header_v2
+{
+ /* psp structure should go at the top of this structure */
+ uint32_t binary_signature; /* 0x7, 0x14, 0x21, 0x28 */
+ uint16_t version_major; /* 0x02 */
+ uint16_t version_minor;
+ uint16_t binary_checksum; /* Byte sum of the binary after this field */
+ uint16_t binary_size; /* Binary Size*/
+ uint16_t num_tables;
+ uint16_t padding;
+ table_info table_list[] __counted_by(num_tables);
+} binary_header_v2;
+
typedef struct die_info
{
uint16_t die_id;
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index bdf8e6ff556c..a9b73f4fd466 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -584,6 +584,9 @@ enum amdgpu_metrics_attr_id {
AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC,
AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HBM,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_AID,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_XCD,
AMDGPU_METRICS_ATTR_ID_MAX,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index b32c053950c9..a8d63d4d1f6e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2222,7 +2222,8 @@ static int smu_v13_0_0_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 32d5e2170d80..54a86eb77cd5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -49,6 +49,13 @@
#undef pr_info
#undef pr_debug
+#define hbm_stack_mask_valid(umc_mask) \
+ (((umc_mask) & 0x3) == 0x3)
+
+#define for_each_hbm_stack(stack_idx, umc_mask) \
+ for ((stack_idx) = 0; (umc_mask); \
+ (umc_mask) >>= 2, (stack_idx)++) \
+
#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \
[smu_feature] = { 1, (smu_13_0_12_feature) }
@@ -262,8 +269,9 @@ static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
int ret;
if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) {
- max_width = (uint8_t)static_metrics->MaxXgmiWidth;
- max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ max_width = (uint8_t)SMUQ10_ROUND(static_metrics->MaxXgmiWidth);
+ max_speed =
+ (uint16_t)SMUQ10_ROUND(static_metrics->MaxXgmiBitrate);
ret = 0;
} else {
MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
@@ -834,7 +842,7 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
struct smu_v13_0_6_gpu_metrics *gpu_metrics)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0, xcc_id, inst, i, j;
+ int ret = 0, xcc_id, inst, i, j, idx;
u8 num_jpeg_rings_gpu_metrics;
MetricsTable_t *metrics;
@@ -849,6 +857,31 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
gpu_metrics->temperature_vrsoc =
SMUQ10_ROUND(metrics->MaxVrTemperature);
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(TEMP_AID_XCD_HBM))) {
+ if (adev->umc.active_mask) {
+ u64 mask = adev->umc.active_mask;
+ int out_idx = 0;
+ int stack_idx;
+
+ if (unlikely(hweight64(mask) / 2 > SMU_13_0_6_MAX_HBM_STACKS)) {
+ dev_warn(adev->dev, "Invalid umc mask %lld\n", mask);
+ } else {
+ for_each_hbm_stack(stack_idx, mask) {
+ if (!hbm_stack_mask_valid(mask))
+ continue;
+ gpu_metrics->temperature_hbm[out_idx++] =
+ metrics->HbmTemperature[stack_idx];
+ }
+ }
+ }
+ idx = 0;
+ for_each_inst(i, adev->aid_mask) {
+ gpu_metrics->temperature_aid[idx] = metrics->AidTemperature[i];
+ idx++;
+ }
+ }
+
gpu_metrics->average_gfx_activity =
SMUQ10_ROUND(metrics->SocketGfxBusy);
gpu_metrics->average_umc_activity =
@@ -964,6 +997,9 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
[i] = SMUQ10_ROUND(
metrics->GfxclkBelowHostLimitTotalAcc[inst]);
}
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(TEMP_AID_XCD_HBM)))
+ gpu_metrics->temperature_xcd[i] = metrics->XcdTemperature[inst];
}
gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 2512a8ff6836..8dc8674b7ce1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -373,6 +373,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
} else {
smu_v13_0_12_tables_fini(smu);
}
+
+ if (fw_ver >= 0x04561000)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_AID_XCD_HBM));
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index ffb06564f830..a150fc88902c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -78,6 +78,7 @@ enum smu_v13_0_6_caps {
SMU_CAP(RAS_EEPROM),
SMU_CAP(FAST_PPT),
SMU_CAP(SYSTEM_POWER_METRICS),
+ SMU_CAP(TEMP_AID_XCD_HBM),
SMU_CAP(ALL),
};
@@ -87,6 +88,8 @@ enum smu_v13_0_6_caps {
#define SMU_13_0_6_MAX_XCC 8
#define SMU_13_0_6_MAX_VCN 4
#define SMU_13_0_6_MAX_JPEG 40
+#define SMU_13_0_6_MAX_AID 4
+#define SMU_13_0_6_MAX_HBM_STACKS 8
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
@@ -222,7 +225,15 @@ extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv;
SMU_13_0_6_MAX_XCC); \
SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
- SMU_13_0_6_MAX_XCC);
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_HBM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hbm, \
+ SMU_13_0_6_MAX_HBM_STACKS); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_AID), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_aid, SMU_13_0_6_MAX_AID); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_XCD), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_xcd, SMU_13_0_6_MAX_XCC); \
+
DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS);
void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index f08cfa510a8a..5500a0f12f0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2224,7 +2224,8 @@ static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 9994d4369da8..73762d9b5969 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -2311,7 +2311,8 @@ static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 845c63ca15b5..c4adad77c8d6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -37,6 +37,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -1771,3 +1772,32 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
+/**
+ * drm_fb_helper_gem_is_fb - Tests if GEM object is framebuffer
+ * @fb_helper: fb_helper instance, can be NULL
+ * @obj: The GEM object to test, can be NULL
+ *
+ * Call drm_fb_helper_gem_is_fb to test is a DRM device's fbdev emulation
+ * uses the specified GEM object for its framebuffer. The result is always
+ * false if either poiner is NULL.
+ *
+ * Returns:
+ * True if fbdev emulation uses the provided GEM object, or false otherwise.
+ */
+bool drm_fb_helper_gem_is_fb(const struct drm_fb_helper *fb_helper,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_object *gem = NULL;
+
+ if (!fb_helper || !obj)
+ return false;
+ if (fb_helper->buffer && fb_helper->buffer->gem)
+ gem = fb_helper->buffer->gem;
+ else if (fb_helper->fb)
+ gem = drm_gem_fb_get_obj(fb_helper->fb, 0);
+
+ return gem == obj;
+}
+EXPORT_SYMBOL_GPL(drm_fb_helper_gem_is_fb);
+
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5d523d5dae88..705c012fcf9e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -37,6 +37,7 @@
#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
@@ -1574,7 +1575,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct radeon_bo *robj;
if (radeon_crtc->cursor_bo) {
struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
@@ -1588,9 +1588,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
- robj = gem_to_radeon_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
- if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
+ if (!drm_fb_helper_gem_is_fb(dev->fb_helper, fb->obj[0])) {
+ struct radeon_bo *robj = gem_to_radeon_bo(fb->obj[0]);
+
r = radeon_bo_reserve(robj, false);
if (r == 0) {
radeon_bo_unpin(robj);
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index 18d61f3f7344..3e243f5e2f44 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -274,20 +274,3 @@ err_radeon_fbdev_destroy_pinned_object:
radeon_fbdev_destroy_pinned_object(gobj);
return ret;
}
-
-bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
-{
- struct drm_fb_helper *fb_helper = rdev_to_drm(rdev)->fb_helper;
- struct drm_gem_object *gobj;
-
- if (!fb_helper)
- return false;
-
- gobj = drm_gem_fb_get_obj(fb_helper->fb, 0);
- if (!gobj)
- return false;
- if (gobj != &robj->tbo.base)
- return false;
-
- return true;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 088af85902f7..ae1ecdc2e189 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -936,14 +936,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
#define RADEON_FBDEV_DRIVER_OPS \
.fbdev_probe = radeon_fbdev_driver_fbdev_probe
-bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
#else
#define RADEON_FBDEV_DRIVER_OPS \
.fbdev_probe = NULL
-static inline bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
-{
- return false;
-}
#endif
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 15274b8a1d97..bf391903443d 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -271,6 +271,8 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_gem_is_fb(const struct drm_fb_helper *fb_helper,
+ const struct drm_gem_object *obj);
#endif
#endif