summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/gpu/amdgpu/apu-asic-info-table.csv2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c210
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c197
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c165
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c48
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c48
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn401/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c280
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c169
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c24
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c12
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h110
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/isp/irqsrcs_isp_4_1.h62
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c30
135 files changed, 2408 insertions, 738 deletions
diff --git a/Documentation/gpu/amdgpu/apu-asic-info-table.csv b/Documentation/gpu/amdgpu/apu-asic-info-table.csv
index 18868abe2a91..5dd4b8762d19 100644
--- a/Documentation/gpu/amdgpu/apu-asic-info-table.csv
+++ b/Documentation/gpu/amdgpu/apu-asic-info-table.csv
@@ -7,7 +7,9 @@ SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0
Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1
Ryzen 6000 series / Ryzen 7x35 series / Ryzen 7x36 series, YELLOW CARP / Rembrandt / Rembrandt-R, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3
Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
+Ryzen 9000 series (AM5), Granite Ridge, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
Ryzen 7x45 series (FL1), Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8
Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
+Ryzen AI 300 series, Strix Point, 3.5.0, 11.5.0, 4.0.5, 6.1.0, 14.0.0
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 4232ab27f990..0051fb1b437f 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -71,6 +71,17 @@ config DRM_AMDGPU_USERPTR
This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it
isn't already selected to enabled full userptr support.
+config DRM_AMD_ISP
+ bool "Enable AMD Image Signal Processor IP support"
+ depends on DRM_AMDGPU
+ select MFD_CORE
+ select PM_GENERIC_DOMAINS if PM
+ help
+ Choose this option to enable ISP IP support for AMD SOCs.
+ This adds the ISP (Image Signal Processor) IP driver and wires
+ it up into the amdgpu driver. It is required for cameras
+ on APUs which utilize mipi cameras.
+
config DRM_AMDGPU_WERROR
bool "Force the compiler to throw an error instead of a warning when compiling"
depends on DRM_AMDGPU
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index dfd2d594e143..9dd8294032ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -323,4 +323,12 @@ amdgpu-y += $(AMD_DISPLAY_FILES)
endif
+# add isp block
+ifneq ($(CONFIG_DRM_AMD_ISP),)
+amdgpu-y += \
+ amdgpu_isp.o \
+ isp_v4_1_0.o \
+ isp_v4_1_1.o
+endif
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7dab4768cee6..137a88b8de45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -112,6 +112,9 @@
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
#include "amdgpu_reg_state.h"
+#if defined(CONFIG_DRM_AMD_ISP)
+#include "amdgpu_isp.h"
+#endif
#define MAX_GPU_INSTANCE 64
@@ -221,7 +224,6 @@ extern int amdgpu_mes;
extern int amdgpu_mes_log_enable;
extern int amdgpu_mes_kiq;
extern int amdgpu_uni_mes;
-extern int amdgpu_jpeg_test;
extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
@@ -721,6 +723,7 @@ enum amd_hw_ip_block_type {
XGMI_HWIP,
DCI_HWIP,
PCIE_HWIP,
+ ISP_HWIP,
MAX_HWIP
};
@@ -1018,7 +1021,6 @@ struct amdgpu_device {
/* jpeg */
struct amdgpu_jpeg jpeg;
- bool enable_jpeg_test;
/* vpe */
struct amdgpu_vpe vpe;
@@ -1048,6 +1050,11 @@ struct amdgpu_device {
/* display related functionality */
struct amdgpu_display_manager dm;
+#if defined(CONFIG_DRM_AMD_ISP)
+ /* isp */
+ struct amdgpu_isp isp;
+#endif
+
/* mes */
bool enable_mes;
bool enable_mes_kiq;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index f932bec6e534..f873dd3cae16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -433,7 +433,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * (1 << mem_channel_width);
+ *vram_width = mem_channel_number * 16;
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 183e219b6a85..b27336a05aae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5069,7 +5069,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive = NULL;
if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
- amdgpu_virt_ready_to_reset(adev);
+ if (!amdgpu_ras_get_fed_status(adev))
+ amdgpu_virt_ready_to_reset(adev);
amdgpu_virt_wait_reset(adev);
clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
r = amdgpu_virt_request_full_gpu(adev, true);
@@ -5837,6 +5838,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
+ dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
+ amdgpu_ras_set_fed(adev, true);
+ set_bit(AMDGPU_HOST_FLR, &reset_context->flags);
+ }
+
r = amdgpu_device_reset_sriov(adev, reset_context);
if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
amdgpu_virt_release_full_gpu(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 0cb8aea93a70..90475ddf1c03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -106,6 +106,9 @@
#include "jpeg_v5_0_0.h"
#include "amdgpu_vpe.h"
+#if defined(CONFIG_DRM_AMD_ISP)
+#include "amdgpu_isp.h"
+#endif
#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
@@ -225,6 +228,7 @@ static int hw_id_map[MAX_HWIP] = {
[DCI_HWIP] = DCI_HWID,
[PCIE_HWIP] = PCIE_HWID,
[VPE_HWIP] = VPE_HWID,
+ [ISP_HWIP] = ISP_HWID,
};
static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
@@ -711,6 +715,12 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
adev->sdma.sdma_mask &=
~(1U << harvest_info->list[i].number_instance);
break;
+#if defined(CONFIG_DRM_AMD_ISP)
+ case ISP_HWID:
+ adev->isp.harvest_config |=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+#endif
default:
break;
}
@@ -2294,8 +2304,6 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(5, 0, 0):
amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
- if (amdgpu_jpeg_test)
- adev->enable_jpeg_test = true;
break;
default:
dev_err(adev->dev,
@@ -2378,6 +2386,24 @@ static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DRM_AMD_ISP)
+ switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
+ break;
+ case IP_VERSION(4, 1, 1):
+ amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
+ break;
+ default:
+ break;
+ }
+#endif
+
+ return 0;
+}
+
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
@@ -2904,6 +2930,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
+ r = amdgpu_discovery_set_isp_ip_blocks(adev);
+ if (r)
+ return r;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 9f2db858c6e0..78089f2f79f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -204,7 +204,6 @@ int amdgpu_force_asic_type = -1;
int amdgpu_tmz = -1; /* auto */
uint amdgpu_freesync_vid_mode;
int amdgpu_reset_method = -1; /* auto */
-int amdgpu_jpeg_test;
int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
int amdgpu_use_xgmi_p2p = 1;
@@ -940,9 +939,6 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
module_param_named(reset_method, amdgpu_reset_method, int, 0644);
-MODULE_PARM_DESC(jpeg_test, "jpeg test(0 = disable (default), 1 = enable)");
-module_param_named(jpeg_test, amdgpu_jpeg_test, int, 0444);
-
/**
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
* threshold value of faulty pages detected by RAS ECC, which may
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 46889bfe5669..aad2027e5c7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -343,11 +343,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
return -EINVAL;
- if ((flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
- ((amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) ||
- !(args->in.domains & AMDGPU_GEM_DOMAIN_VRAM)))
- return -EINVAL;
-
if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 322b8ff67cde..3a7622611916 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -718,7 +718,11 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
ndw += kiq->pmf->invalidate_tlbs_size;
spin_lock(&adev->gfx.kiq[inst].ring_lock);
- amdgpu_ring_alloc(ring, ndw);
+ r = amdgpu_ring_alloc(ring, ndw);
+ if (r) {
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ goto error_unlock_reset;
+ }
if (adev->gmc.flush_tlb_needs_extra_type_2)
kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 013ff373e067..19ce4da285e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -466,7 +466,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
+ } else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) ||
+ (client_id == SOC15_IH_CLIENTID_ISP)) &&
adev->irq.virq[src_id]) {
generic_handle_domain_irq(adev->irq.domain, src_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
new file mode 100644
index 000000000000..4766e99dd98f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/mfd/core.h>
+
+#include "amdgpu.h"
+#include "amdgpu_isp.h"
+#include "isp_v4_1_0.h"
+#include "isp_v4_1_1.h"
+
+static int isp_sw_init(void *handle)
+{
+ return 0;
+}
+
+static int isp_sw_fini(void *handle)
+{
+ return 0;
+}
+
+/**
+ * isp_hw_init - start and test isp block
+ *
+ * @handle: handle for amdgpu_device pointer
+ *
+ */
+static int isp_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ const struct amdgpu_ip_block *ip_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ISP);
+
+ if (!ip_block)
+ return -EINVAL;
+
+ if (isp->funcs->hw_init != NULL)
+ return isp->funcs->hw_init(isp);
+
+ return -ENODEV;
+}
+
+/**
+ * isp_hw_fini - stop the hardware block
+ *
+ * @handle: handle for amdgpu_device pointer
+ *
+ */
+static int isp_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ if (isp->funcs->hw_fini != NULL)
+ return isp->funcs->hw_fini(isp);
+
+ return -ENODEV;
+}
+
+static int isp_suspend(void *handle)
+{
+ return 0;
+}
+
+static int isp_resume(void *handle)
+{
+ return 0;
+}
+
+static int isp_load_fw_by_psp(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *hdr;
+ char ucode_prefix[10];
+ int r = 0;
+
+ /* get isp fw binary name and path */
+ amdgpu_ucode_ip_version_decode(adev, ISP_HWIP, ucode_prefix,
+ sizeof(ucode_prefix));
+
+ /* read isp fw */
+ r = amdgpu_ucode_request(adev, &adev->isp.fw, "amdgpu/%s.bin", ucode_prefix);
+ if (r) {
+ amdgpu_ucode_release(&adev->isp.fw);
+ return r;
+ }
+
+ hdr = (const struct common_firmware_header *)adev->isp.fw->data;
+
+ adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].ucode_id =
+ AMDGPU_UCODE_ID_ISP;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].fw = adev->isp.fw;
+
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+
+ return r;
+}
+
+static int isp_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ isp_v4_1_0_set_isp_funcs(isp);
+ break;
+ case IP_VERSION(4, 1, 1):
+ isp_v4_1_1_set_isp_funcs(isp);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ isp->adev = adev;
+ isp->parent = adev->dev;
+
+ if (isp_load_fw_by_psp(adev)) {
+ DRM_DEBUG_DRIVER("%s: isp fw load failed\n", __func__);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static bool isp_is_idle(void *handle)
+{
+ return true;
+}
+
+static int isp_wait_for_idle(void *handle)
+{
+ return 0;
+}
+
+static int isp_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static int isp_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int isp_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static const struct amd_ip_funcs isp_ip_funcs = {
+ .name = "isp_ip",
+ .early_init = isp_early_init,
+ .late_init = NULL,
+ .sw_init = isp_sw_init,
+ .sw_fini = isp_sw_fini,
+ .hw_init = isp_hw_init,
+ .hw_fini = isp_hw_fini,
+ .suspend = isp_suspend,
+ .resume = isp_resume,
+ .is_idle = isp_is_idle,
+ .wait_for_idle = isp_wait_for_idle,
+ .soft_reset = isp_soft_reset,
+ .set_clockgating_state = isp_set_clockgating_state,
+ .set_powergating_state = isp_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version isp_v4_1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_ISP,
+ .major = 4,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &isp_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version isp_v4_1_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_ISP,
+ .major = 4,
+ .minor = 1,
+ .rev = 1,
+ .funcs = &isp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
new file mode 100644
index 000000000000..44e2ea8c9728
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#ifndef __AMDGPU_ISP_H__
+#define __AMDGPU_ISP_H__
+
+#define ISP_REGS_OFFSET_END 0x629A4
+
+struct amdgpu_isp;
+
+struct isp_platform_data {
+ void *adev;
+ u32 asic_type;
+ resource_size_t base_rmmio_size;
+};
+
+struct isp_funcs {
+ int (*hw_init)(struct amdgpu_isp *isp);
+ int (*hw_fini)(struct amdgpu_isp *isp);
+};
+
+struct amdgpu_isp {
+ struct device *parent;
+ struct amdgpu_device *adev;
+ const struct isp_funcs *funcs;
+ struct mfd_cell *isp_cell;
+ struct resource *isp_res;
+ struct isp_platform_data *isp_pdata;
+ unsigned int harvest_config;
+ const struct firmware *fw;
+};
+
+extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block;
+extern const struct amdgpu_ip_block_version isp_v4_1_1_ip_block;
+
+#endif /* __AMDGPU_ISP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index aea31d61d991..f9cdd873ac9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -60,6 +60,37 @@
RREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_DATA); \
})
+#define WREG32_SOC24_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \
+ do { \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_MASK, 0xFFFFFFFF); \
+ WREG32_SOC15( \
+ JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_CTL, \
+ (UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \
+ indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ } while (0)
+
+#define RREG32_SOC24_JPEG_DPG_MODE(inst_idx, offset, mask_en) \
+ do { \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_MASK, 0xFFFFFFFF); \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_CTL, \
+ (UVD_DPG_LMA_CTL__MASK_EN_MASK | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ RREG32_SOC15(JPEG, inst_idx, regUVD_DPG_LMA_DATA); \
+ } while (0)
+
+#define ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, offset, value, indirect) \
+ do { \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = offset; \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = value; \
+ } while (0)
+
struct amdgpu_jpeg_reg{
unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index a1b7438c43dc..e32161f6b67a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1599,36 +1599,39 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
u64 size;
if (dma_resv_trylock(bo->tbo.base.resv)) {
-
- switch (bo->tbo.resource->mem_type) {
- case TTM_PL_VRAM:
- if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
- placement = "VRAM VISIBLE";
- else
- placement = "VRAM";
- break;
- case TTM_PL_TT:
- placement = "GTT";
- break;
- case AMDGPU_PL_GDS:
- placement = "GDS";
- break;
- case AMDGPU_PL_GWS:
- placement = "GWS";
- break;
- case AMDGPU_PL_OA:
- placement = "OA";
- break;
- case AMDGPU_PL_PREEMPT:
- placement = "PREEMPTIBLE";
- break;
- case AMDGPU_PL_DOORBELL:
- placement = "DOORBELL";
- break;
- case TTM_PL_SYSTEM:
- default:
- placement = "CPU";
- break;
+ if (!bo->tbo.resource) {
+ placement = "NONE";
+ } else {
+ switch (bo->tbo.resource->mem_type) {
+ case TTM_PL_VRAM:
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+ placement = "VRAM VISIBLE";
+ else
+ placement = "VRAM";
+ break;
+ case TTM_PL_TT:
+ placement = "GTT";
+ break;
+ case AMDGPU_PL_GDS:
+ placement = "GDS";
+ break;
+ case AMDGPU_PL_GWS:
+ placement = "GWS";
+ break;
+ case AMDGPU_PL_OA:
+ placement = "OA";
+ break;
+ case AMDGPU_PL_PREEMPT:
+ placement = "PREEMPTIBLE";
+ break;
+ case AMDGPU_PL_DOORBELL:
+ placement = "DOORBELL";
+ break;
+ case TTM_PL_SYSTEM:
+ default:
+ placement = "CPU";
+ break;
+ }
}
dma_resv_unlock(bo->tbo.base.resv);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index f89de056a828..e15814d9ca17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2559,6 +2559,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_JPEG_RAM:
*type = GFX_FW_TYPE_JPEG_RAM;
break;
+ case AMDGPU_UCODE_ID_ISP:
+ *type = GFX_FW_TYPE_ISP;
+ break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 68e9935028db..4edd8e333d36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -120,7 +120,7 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
-#define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms
#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
@@ -1384,10 +1384,17 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
memset(&qctx, 0, sizeof(qctx));
qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ?
RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID);
+
+ if (!down_read_trylock(&adev->reset_domain->sem)) {
+ ret = -EIO;
+ goto out_fini_err_data;
+ }
+
ret = amdgpu_ras_query_error_status_helper(adev, info,
&err_data,
&qctx,
error_query_mode);
+ up_read(&adev->reset_domain->sem);
if (ret)
goto out_fini_err_data;
@@ -2105,10 +2112,8 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj
if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
- amdgpu_ras_put_poison_req(obj->adev,
- AMDGPU_RAS_BLOCK__UMC, 0, NULL, NULL, false);
-
atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_creation_count);
wake_up(&con->page_retirement_wq);
}
@@ -2799,7 +2804,8 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key));
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
- ecc_log->de_updated = false;
+ ecc_log->de_queried_count = 0;
+ ecc_log->prev_de_queried_count = 0;
}
static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
@@ -2818,7 +2824,8 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
mutex_unlock(&ecc_log->lock);
mutex_destroy(&ecc_log->lock);
- ecc_log->de_updated = false;
+ ecc_log->de_queried_count = 0;
+ ecc_log->prev_de_queried_count = 0;
}
static void amdgpu_ras_do_page_retirement(struct work_struct *work)
@@ -2850,60 +2857,116 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
mutex_unlock(&con->umc_ecc_log.lock);
}
-static void amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
- uint32_t timeout_ms)
+static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
+ uint32_t poison_creation_count)
{
int ret = 0;
struct ras_ecc_log_info *ecc_log;
struct ras_query_if info;
- uint32_t timeout = timeout_ms;
+ uint32_t timeout = 0;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint64_t de_queried_count;
+ uint32_t new_detect_count, total_detect_count;
+ uint32_t need_query_count = poison_creation_count;
+ bool query_data_timeout = false;
memset(&info, 0, sizeof(info));
info.head.block = AMDGPU_RAS_BLOCK__UMC;
ecc_log = &ras->umc_ecc_log;
- ecc_log->de_updated = false;
+ total_detect_count = 0;
do {
ret = amdgpu_ras_query_error_status(adev, &info);
- if (ret) {
- dev_err(adev->dev, "Failed to query ras error! ret:%d\n", ret);
- return;
+ if (ret)
+ return ret;
+
+ de_queried_count = ecc_log->de_queried_count;
+ if (de_queried_count > ecc_log->prev_de_queried_count) {
+ new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
+ ecc_log->prev_de_queried_count = de_queried_count;
+ timeout = 0;
+ } else {
+ new_detect_count = 0;
}
- if (timeout && !ecc_log->de_updated) {
- msleep(1);
- timeout--;
+ if (new_detect_count) {
+ total_detect_count += new_detect_count;
+ } else {
+ if (!timeout && need_query_count)
+ timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
+
+ if (timeout) {
+ if (!--timeout) {
+ query_data_timeout = true;
+ break;
+ }
+ msleep(1);
+ }
}
- } while (timeout && !ecc_log->de_updated);
+ } while (total_detect_count < need_query_count);
- if (timeout_ms && !timeout) {
- dev_warn(adev->dev, "Can't find deferred error\n");
- return;
+ if (query_data_timeout) {
+ dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
+ (need_query_count - total_detect_count));
+ return -ENOENT;
}
- if (!ret)
+ if (total_detect_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
+
+ return 0;
+}
+
+static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_poison_msg msg;
+ int ret;
+
+ do {
+ ret = kfifo_get(&con->poison_fifo, &msg);
+ } while (ret);
}
static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
- struct ras_poison_msg *poison_msg)
+ uint32_t msg_count, uint32_t *gpu_reset)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- uint32_t reset = poison_msg->reset;
- uint16_t pasid = poison_msg->pasid;
+ uint32_t reset_flags = 0, reset = 0;
+ struct ras_poison_msg msg;
+ int ret, i;
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (poison_msg->pasid_fn)
- poison_msg->pasid_fn(adev, pasid, poison_msg->data);
+ for (i = 0; i < msg_count; i++) {
+ ret = amdgpu_ras_get_poison_req(adev, &msg);
+ if (!ret)
+ continue;
+
+ if (msg.pasid_fn)
+ msg.pasid_fn(adev, msg.pasid, msg.data);
+
+ reset_flags |= msg.reset;
+ }
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
- if (reset && !con->is_rma) {
+ if (reset_flags && !con->is_rma) {
+ if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ else
+ reset = reset_flags;
+
flush_delayed_work(&con->page_retirement_dwork);
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
+
+ *gpu_reset = reset;
+
+ /* Wait for gpu recovery to complete */
+ flush_work(&con->recovery_work);
}
return 0;
@@ -2913,9 +2976,9 @@ static int amdgpu_ras_page_retirement_thread(void *param)
{
struct amdgpu_device *adev = (struct amdgpu_device *)param;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_poison_msg poison_msg;
- enum amdgpu_ras_block ras_block;
- bool poison_creation_is_handled = false;
+ uint32_t poison_creation_count, msg_count;
+ uint32_t gpu_reset;
+ int ret;
while (!kthread_should_stop()) {
@@ -2926,33 +2989,61 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
- atomic_dec(&con->page_retirement_req_cnt);
+ gpu_reset = 0;
- if (!amdgpu_ras_get_poison_req(adev, &poison_msg))
- continue;
+ do {
+ poison_creation_count = atomic_read(&con->poison_creation_count);
+ ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
+ if (ret == -EIO)
+ break;
- ras_block = poison_msg.block;
+ if (poison_creation_count) {
+ atomic_sub(poison_creation_count, &con->poison_creation_count);
+ atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
+ }
+ } while (atomic_read(&con->poison_creation_count));
+
+ if (ret != -EIO) {
+ msg_count = kfifo_len(&con->poison_fifo);
+ if (msg_count) {
+ ret = amdgpu_ras_poison_consumption_handler(adev,
+ msg_count, &gpu_reset);
+ if ((ret != -EIO) &&
+ (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
+ atomic_sub(msg_count, &con->page_retirement_req_cnt);
+ }
+ }
- dev_dbg(adev->dev, "Start processing ras block %s(%d)\n",
- ras_block_str(ras_block), ras_block);
+ if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
+ /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
+ /* Clear poison creation request */
+ atomic_set(&con->poison_creation_count, 0);
- if (ras_block == AMDGPU_RAS_BLOCK__UMC) {
- amdgpu_ras_poison_creation_handler(adev,
- MAX_UMC_POISON_POLLING_TIME_ASYNC);
- poison_creation_is_handled = true;
- } else {
- /* poison_creation_is_handled:
- * false: no poison creation interrupt, but it has poison
- * consumption interrupt.
- * true: It has poison creation interrupt at the beginning,
- * but it has no poison creation interrupt later.
- */
- amdgpu_ras_poison_creation_handler(adev,
- poison_creation_is_handled ?
- 0 : MAX_UMC_POISON_POLLING_TIME_ASYNC);
+ /* Clear poison fifo */
+ amdgpu_ras_clear_poison_fifo(adev);
+
+ /* Clear all poison requests */
+ atomic_set(&con->page_retirement_req_cnt, 0);
+
+ if (ret == -EIO) {
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ /* Wake up work to save bad pages to eeprom */
+ schedule_delayed_work(&con->page_retirement_dwork, 0);
+ } else if (gpu_reset) {
+ /* gpu just completed mode-2 reset or other reset */
+ /* Clear poison consumption messages cached in fifo */
+ msg_count = kfifo_len(&con->poison_fifo);
+ if (msg_count) {
+ amdgpu_ras_clear_poison_fifo(adev);
+ atomic_sub(msg_count, &con->page_retirement_req_cnt);
+ }
- amdgpu_ras_poison_consumption_handler(adev, &poison_msg);
- poison_creation_is_handled = false;
+ /* Wake up work to save bad pages to eeprom */
+ schedule_delayed_work(&con->page_retirement_dwork, 0);
}
}
@@ -3026,6 +3117,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
mutex_init(&con->page_retirement_lock);
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
+ atomic_set(&con->poison_creation_count, 0);
con->page_retirement_thread =
kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
if (IS_ERR(con->page_retirement_thread)) {
@@ -3074,6 +3166,7 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
kthread_stop(con->page_retirement_thread);
atomic_set(&con->page_retirement_req_cnt, 0);
+ atomic_set(&con->poison_creation_count, 0);
mutex_destroy(&con->page_rsv_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 83437fef9df5..0fa1148e6642 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -469,7 +469,8 @@ struct ras_ecc_log_info {
struct mutex lock;
siphash_key_t ecc_key;
struct radix_tree_root de_page_tree;
- bool de_updated;
+ uint64_t de_queried_count;
+ uint64_t prev_de_queried_count;
};
struct amdgpu_ras {
@@ -531,6 +532,7 @@ struct amdgpu_ras {
wait_queue_head_t page_retirement_wq;
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
+ atomic_t poison_creation_count;
struct mutex page_rsv_lock;
DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
struct ras_ecc_log_info umc_ecc_log;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3588f1c5a007..4c7b53648a50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -714,6 +714,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
return "RS64_MEC_P2_STACK";
case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
return "RS64_MEC_P3_STACK";
+ case AMDGPU_UCODE_ID_ISP:
+ return "ISP";
default:
return "UNKNOWN UCODE";
}
@@ -1413,6 +1415,9 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
case VPE_HWIP:
ip_name = "vpe";
break;
+ case ISP_HWIP:
+ ip_name = "isp";
+ break;
default:
BUG();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index f4e5285c4dd6..5bc37acd3981 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -523,6 +523,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER,
AMDGPU_UCODE_ID_P2S_TABLE,
AMDGPU_UCODE_ID_JPEG_RAM,
+ AMDGPU_UCODE_ID_ISP,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 20e0e522fb51..2f84bdb8c594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -293,14 +293,15 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
amdgpu_ras_error_data_fini(&err_data);
} else {
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-
- amdgpu_ras_put_poison_req(adev,
- block, pasid, pasid_fn, data, reset);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret;
+ ret = amdgpu_ras_put_poison_req(adev,
+ block, pasid, pasid_fn, data, reset);
+ if (!ret) {
atomic_inc(&con->page_retirement_req_cnt);
-
wake_up(&con->page_retirement_wq);
+ }
}
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 63f2286858c4..ccb3d041c2b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -229,6 +229,22 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
adev->virt.mm_table.gpu_addr = 0;
}
+/**
+ * amdgpu_virt_rcvd_ras_interrupt() - receive ras interrupt
+ * @adev: amdgpu device.
+ * Check whether host sent RAS error message
+ * Return: true if found, otherwise false
+ */
+bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (!virt->ops || !virt->ops->rcvd_ras_intr)
+ return false;
+
+ return virt->ops->rcvd_ras_intr(adev);
+}
+
unsigned int amd_sriov_msg_checksum(void *obj,
unsigned long obj_size,
@@ -612,11 +628,14 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
ret = amdgpu_virt_read_pf2vf_data(adev);
if (ret) {
adev->virt.vf2pf_update_retry_cnt++;
- if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) &&
- amdgpu_sriov_runtime(adev)) {
+
+ if ((amdgpu_virt_rcvd_ras_interrupt(adev) ||
+ adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) &&
+ amdgpu_sriov_runtime(adev)) {
+
amdgpu_ras_set_fed(adev, true);
if (amdgpu_reset_domain_schedule(adev->reset_domain,
- &adev->kfd.reset_work))
+ &adev->kfd.reset_work))
return;
else
dev_err(adev->dev, "Failed to queue work! at %s", __func__);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index f04cd1586c72..b42a8854dca0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -52,7 +52,7 @@
/* tonga/fiji use this offset */
#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
-#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 5
+#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2
enum amdgpu_sriov_vf_mode {
SRIOV_VF_MODE_BARE_METAL = 0,
@@ -94,6 +94,7 @@ struct amdgpu_virt_ops {
u32 data1, u32 data2, u32 data3);
void (*ras_poison_handler)(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
+ bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
};
/*
@@ -352,6 +353,7 @@ void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev);
int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
+bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev);
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 2a510351dfce..5c17409439f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -611,10 +611,9 @@ static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *
{
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- char fw_name[40];
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
+ "amdgpu/%s_toc.bin", ucode_prefix);
if (err)
goto out;
@@ -653,7 +652,6 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
- char fw_name[40];
char ucode_prefix[25];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
@@ -663,9 +661,8 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ "amdgpu/%s_pfp.bin", ucode_prefix);
if (err)
goto out;
/* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
@@ -681,8 +678,8 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ "amdgpu/%s_me.bin", ucode_prefix);
if (err)
goto out;
if (adev->gfx.rs64_enable) {
@@ -696,10 +693,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
adev->pdev->revision == 0xCE)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/gc_11_0_0_rlc_1.bin");
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/gc_11_0_0_rlc_1.bin");
else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -710,8 +708,8 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec.bin", ucode_prefix);
if (err)
goto out;
if (adev->gfx.rs64_enable) {
@@ -4500,11 +4498,11 @@ static int gfx_v11_0_hw_init(void *handle)
/* RLC autoload sequence 1: Program rlc ram */
if (adev->gfx.imu.funcs->program_rlc_ram)
adev->gfx.imu.funcs->program_rlc_ram(adev);
+ /* rlc autoload firmware */
+ r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
+ if (r)
+ return r;
}
- /* rlc autoload firmware */
- r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
- if (r)
- return r;
} else {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 460bf33a22b1..e9559bdd8264 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -386,10 +386,9 @@ static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *
{
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- char fw_name[40];
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
+ "amdgpu/%s_toc.bin", ucode_prefix);
if (err)
goto out;
@@ -407,7 +406,6 @@ out:
static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char fw_name[40];
char ucode_prefix[15];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
@@ -418,23 +416,23 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ "amdgpu/%s_pfp.bin", ucode_prefix);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ "amdgpu/%s_me.bin", ucode_prefix);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
if (!amdgpu_sriov_vf(adev)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -445,8 +443,8 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec.bin", ucode_prefix);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 8d8763ebe027..1149595a02d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -55,6 +55,14 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+#define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */
+#define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */
+#define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */
+#define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */
+
+#define NORMALIZE_XCC_REG_OFFSET(offset) \
+ (offset & 0xFFFF)
+
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
@@ -217,9 +225,24 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
}
}
+static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
+{
+ uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
+
+ /* If it is an XCC reg, normalize the reg to keep
+ lower 16 bits in local xcc */
+
+ if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
+ ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
+ return normalized_reg;
+ else
+ return reg;
+}
+
static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
bool wc, uint32_t reg, uint32_t val)
{
+ reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
WRITE_DATA_DST_SEL(0) |
@@ -234,6 +257,12 @@ static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
uint32_t addr1, uint32_t ref, uint32_t mask,
uint32_t inv)
{
+ /* Only do the normalization on regspace */
+ if (mem_space == 0) {
+ addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
+ addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
+ }
+
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring,
/* memory (1) or register (0) */
@@ -2725,6 +2754,8 @@ static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
{
struct amdgpu_device *adev = ring->adev;
+ reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
+
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
(5 << 8) | /* dst: memory */
@@ -2742,6 +2773,8 @@ static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
{
uint32_t cmd = 0;
+ reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
+
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_GFX:
cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index e14acab5cceb..72109abe7c86 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -629,9 +629,11 @@ static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev,
status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS);
fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
- /* reset page fault status */
- WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
- regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* clear page fault status and address */
+ WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
+ regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
+ }
return fed;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index be78507ec0d8..fd3ac483760e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -464,10 +464,6 @@ static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC);
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 88b4644f8e96..b73136d390cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -672,7 +672,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
(amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
return 0;
- WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 3cb64c8f7175..18a761d6ef33 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -135,6 +135,34 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+
+ if (enable) {
+ /* Unset the CLEAR_OVERFLOW bit to make sure the next step
+ * is switching the bit from 0 to 1
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Clear RB_OVERFLOW bit */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ }
+
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 0fbf5fa7b0f8..2e0469feca1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -535,6 +535,12 @@ static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev)
static int ih_v6_1_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ ret = amdgpu_irq_add_domain(adev);
+ if (ret) {
+ return ret;
+ }
ih_v6_1_set_interrupt_funcs(adev);
ih_v6_1_set_self_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
new file mode 100644
index 000000000000..aac107898bae
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include "amdgpu.h"
+#include "isp_v4_1_0.h"
+
+static const unsigned int isp_4_1_0_int_srcid[MAX_ISP410_INT_SRC] = {
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT11,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT12,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT13,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT14,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT15,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16
+};
+
+static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
+{
+ struct amdgpu_device *adev = isp->adev;
+ u64 isp_base;
+ int int_idx;
+ int r;
+
+ if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
+ return -EINVAL;
+
+ isp_base = adev->rmmio_base;
+
+ isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL);
+ if (!isp->isp_cell) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_res = kcalloc(MAX_ISP410_INT_SRC + 1, sizeof(struct resource),
+ GFP_KERNEL);
+ if (!isp->isp_res) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
+ if (!isp->isp_pdata) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp platform data alloc failed\n", __func__);
+ goto failure;
+ }
+
+ /* initialize isp platform data */
+ isp->isp_pdata->adev = (void *)adev;
+ isp->isp_pdata->asic_type = adev->asic_type;
+ isp->isp_pdata->base_rmmio_size = adev->rmmio_size;
+
+ isp->isp_res[0].name = "isp_4_1_0_reg";
+ isp->isp_res[0].flags = IORESOURCE_MEM;
+ isp->isp_res[0].start = isp_base;
+ isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END;
+
+ for (int_idx = 0; int_idx < MAX_ISP410_INT_SRC; int_idx++) {
+ isp->isp_res[int_idx + 1].name = "isp_4_1_0_irq";
+ isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ;
+ isp->isp_res[int_idx + 1].start =
+ amdgpu_irq_create_mapping(adev, isp_4_1_0_int_srcid[int_idx]);
+ isp->isp_res[int_idx + 1].end =
+ isp->isp_res[int_idx + 1].start;
+ }
+
+ isp->isp_cell[0].name = "amd_isp_capture";
+ isp->isp_cell[0].num_resources = MAX_ISP410_INT_SRC + 1;
+ isp->isp_cell[0].resources = &isp->isp_res[0];
+ isp->isp_cell[0].platform_data = isp->isp_pdata;
+ isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1);
+ if (r) {
+ DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
+ goto failure;
+ }
+
+ return 0;
+
+failure:
+
+ kfree(isp->isp_pdata);
+ kfree(isp->isp_res);
+ kfree(isp->isp_cell);
+
+ return r;
+}
+
+static int isp_v4_1_0_hw_fini(struct amdgpu_isp *isp)
+{
+ mfd_remove_devices(isp->parent);
+
+ kfree(isp->isp_res);
+ kfree(isp->isp_cell);
+ kfree(isp->isp_pdata);
+
+ return 0;
+}
+
+static const struct isp_funcs isp_v4_1_0_funcs = {
+ .hw_init = isp_v4_1_0_hw_init,
+ .hw_fini = isp_v4_1_0_hw_fini,
+};
+
+void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp)
+{
+ isp->funcs = &isp_v4_1_0_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
new file mode 100644
index 000000000000..315f2822410c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#ifndef __ISP_V4_1_0_H__
+#define __ISP_V4_1_0_H__
+
+#include "amdgpu_isp.h"
+
+#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
+
+#define MAX_ISP410_INT_SRC 8
+
+void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
new file mode 100644
index 000000000000..4e17fa03f7b5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include "amdgpu.h"
+#include "isp_v4_1_1.h"
+
+static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT11,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT12,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT13,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT14,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT15,
+ ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16
+};
+
+static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
+{
+ struct amdgpu_device *adev = isp->adev;
+ u64 isp_base;
+ int int_idx;
+ int r;
+
+ if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
+ return -EINVAL;
+
+ isp_base = adev->rmmio_base;
+
+ isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL);
+ if (!isp->isp_cell) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_res = kcalloc(MAX_ISP411_INT_SRC + 1, sizeof(struct resource),
+ GFP_KERNEL);
+ if (!isp->isp_res) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
+ if (!isp->isp_pdata) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp platform data alloc failed\n", __func__);
+ goto failure;
+ }
+
+ /* initialize isp platform data */
+ isp->isp_pdata->adev = (void *)adev;
+ isp->isp_pdata->asic_type = adev->asic_type;
+ isp->isp_pdata->base_rmmio_size = adev->rmmio_size;
+
+ isp->isp_res[0].name = "isp_4_1_1_reg";
+ isp->isp_res[0].flags = IORESOURCE_MEM;
+ isp->isp_res[0].start = isp_base;
+ isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END;
+
+ for (int_idx = 0; int_idx < MAX_ISP411_INT_SRC; int_idx++) {
+ isp->isp_res[int_idx + 1].name = "isp_4_1_1_irq";
+ isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ;
+ isp->isp_res[int_idx + 1].start =
+ amdgpu_irq_create_mapping(adev, isp_4_1_1_int_srcid[int_idx]);
+ isp->isp_res[int_idx + 1].end =
+ isp->isp_res[int_idx + 1].start;
+ }
+
+ isp->isp_cell[0].name = "amd_isp_capture";
+ isp->isp_cell[0].num_resources = MAX_ISP411_INT_SRC + 1;
+ isp->isp_cell[0].resources = &isp->isp_res[0];
+ isp->isp_cell[0].platform_data = isp->isp_pdata;
+ isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1);
+ if (r) {
+ DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
+ goto failure;
+ }
+
+ return 0;
+
+failure:
+
+ kfree(isp->isp_pdata);
+ kfree(isp->isp_res);
+ kfree(isp->isp_cell);
+
+ return r;
+}
+
+static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
+{
+ mfd_remove_devices(isp->parent);
+
+ kfree(isp->isp_res);
+ kfree(isp->isp_cell);
+ kfree(isp->isp_pdata);
+
+ return 0;
+}
+
+static const struct isp_funcs isp_v4_1_1_funcs = {
+ .hw_init = isp_v4_1_1_hw_init,
+ .hw_fini = isp_v4_1_1_hw_fini,
+};
+
+void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp)
+{
+ isp->funcs = &isp_v4_1_1_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
new file mode 100644
index 000000000000..dfb9522c9d6a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#ifndef __ISP_V4_1_1_H__
+#define __ISP_V4_1_1_H__
+
+#include "amdgpu_isp.h"
+
+#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
+
+#define MAX_ISP411_INT_SRC 8
+
+void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index 68ef29bc70e2..d694a276498a 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -31,6 +31,7 @@
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#include "jpeg_v5_0_0.h"
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -137,9 +138,9 @@ static int jpeg_v5_0_0_hw_init(void *handle)
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
- WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
- ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
- VCN_JPEG_DB_CTRL__EN_MASK);
+ /* Skip ring test because pause DPG is not implemented. */
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG)
+ return 0;
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -239,7 +240,7 @@ static void jpeg_v5_0_0_enable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
}
-static int jpeg_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev)
+static int jpeg_v5_0_0_disable_power_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
@@ -252,14 +253,10 @@ static int jpeg_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
- /* keep the JPEG in static PG mode */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
- ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
-
return 0;
}
-static int jpeg_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev)
+static int jpeg_v5_0_0_enable_power_gating(struct amdgpu_device *adev)
{
/* enable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
@@ -277,6 +274,121 @@ static int jpeg_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev)
return 0;
}
+static void jpeg_engine_5_0_0_dpg_clock_gating_mode(struct amdgpu_device *adev,
+ int inst_idx, uint8_t indirect)
+{
+ uint32_t data = 0;
+
+ // JPEG disable CGC
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
+ data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+
+ if (indirect) {
+ ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_CGC_CTRL, data, indirect);
+
+ // Turn on All JPEG clocks
+ data = 0;
+ ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_CGC_GATE, data, indirect);
+ } else {
+ WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_CGC_CTRL, data, indirect);
+
+ // Turn on All JPEG clocks
+ data = 0;
+ WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_CGC_GATE, data, indirect);
+ }
+}
+
+/**
+ * jpeg_v5_0_0_start_dpg_mode - Jpeg start with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Start JPEG block with dpg mode
+ */
+static int jpeg_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec;
+ uint32_t reg_data = 0;
+
+ jpeg_v5_0_0_enable_power_gating(adev);
+
+ // enable dynamic power gating mode
+ reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
+ reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
+
+ if (indirect)
+ adev->jpeg.inst[inst_idx].dpg_sram_curr_addr =
+ (uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr;
+
+ jpeg_engine_5_0_0_dpg_clock_gating_mode(adev, inst_idx, indirect);
+
+ /* MJPEG global tiling registers */
+ if (indirect)
+ ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, indirect);
+ else
+ WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 1);
+
+ /* enable System Interrupt for JRBC */
+ if (indirect)
+ ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_SYS_INT_EN,
+ JPEG_SYS_INT_EN__DJRBC0_MASK, indirect);
+ else
+ WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_SYS_INT_EN,
+ JPEG_SYS_INT_EN__DJRBC0_MASK, 1);
+
+ if (indirect) {
+ /* add nop to workaround PSP size check */
+ ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipUVD_NO_OP, 0, indirect);
+
+ amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0);
+ }
+
+ WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_0_stop_dpg_mode - Jpeg stop with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ *
+ * Stop JPEG block with dpg mode
+ */
+static void jpeg_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t reg_data = 0;
+
+ reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
+ reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
+ WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
+}
+
/**
* jpeg_v5_0_0_start - start JPEG block
*
@@ -292,8 +404,13 @@ static int jpeg_v5_0_0_start(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, true);
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
+ r = jpeg_v5_0_0_start_dpg_mode(adev, 0, adev->jpeg.indirect_sram);
+ return r;
+ }
+
/* disable power gating */
- r = jpeg_v5_0_0_disable_static_power_gating(adev);
+ r = jpeg_v5_0_0_disable_power_gating(adev);
if (r)
return r;
@@ -304,7 +421,6 @@ static int jpeg_v5_0_0_start(struct amdgpu_device *adev)
WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
-
/* enable JMI channel */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
@@ -314,6 +430,10 @@ static int jpeg_v5_0_0_start(struct amdgpu_device *adev)
JPEG_SYS_INT_EN__DJRBC0_MASK,
~JPEG_SYS_INT_EN__DJRBC0_MASK);
+ WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
+ ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
@@ -340,17 +460,22 @@ static int jpeg_v5_0_0_stop(struct amdgpu_device *adev)
{
int r;
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+ if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
+ jpeg_v5_0_0_stop_dpg_mode(adev, 0);
+ } else {
- jpeg_v5_0_0_enable_clock_gating(adev);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
- /* enable power gating */
- r = jpeg_v5_0_0_enable_static_power_gating(adev);
- if (r)
- return r;
+ jpeg_v5_0_0_enable_clock_gating(adev);
+
+ /* enable power gating */
+ r = jpeg_v5_0_0_enable_power_gating(adev);
+ if (r)
+ return r;
+ }
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h
index bd348336b215..5abb96159814 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h
@@ -24,6 +24,12 @@
#ifndef __JPEG_V5_0_0_H__
#define __JPEG_V5_0_0_H__
+#define vcnipJPEG_CGC_GATE 0x4160
+#define vcnipJPEG_CGC_CTRL 0x4161
+#define vcnipJPEG_SYS_INT_EN 0x4141
+#define vcnipUVD_NO_OP 0x0029
+#define vcnipJPEG_DEC_GFX10_ADDR_CONFIG 0x404A
+
extern const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block;
#endif /* __JPEG_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 7a1ff298417a..8d7267a013d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -566,9 +566,11 @@ static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev,
status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS);
fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
- /* reset page fault status */
- WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
- regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* clear page fault status and address */
+ WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
+ regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1);
+ }
return fed;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 6b71ee85ee65..f5411b798e11 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -93,7 +93,7 @@ static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
timeout -= 5;
} while (timeout > 1);
- pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
+ dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
return -ETIME;
}
@@ -111,7 +111,7 @@ static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
timeout -= 10;
} while (timeout > 1);
- pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+ dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
return -ETIME;
}
@@ -132,7 +132,7 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_ai_mailbox_set_valid(adev, false);
trn = xgpu_ai_peek_ack(adev);
if (trn) {
- pr_err("trn=%x ACK should not assert! wait again !\n", trn);
+ dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
msleep(1);
}
} while(trn);
@@ -155,7 +155,7 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
/* start to poll ack */
r = xgpu_ai_poll_ack(adev);
if (r)
- pr_err("Doesn't get ack from pf, continue\n");
+ dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
xgpu_ai_mailbox_set_valid(adev, false);
}
@@ -173,7 +173,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
req == IDH_REQ_GPU_RESET_ACCESS) {
r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
if (r) {
- pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
+ dev_err(adev->dev, "Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
return r;
}
/* Retrieve checksum from mailbox2 */
@@ -231,7 +231,7 @@ static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- DRM_DEBUG("get ack intr and do nothing.\n");
+ dev_dbg(adev->dev, "get ack intr and do nothing.\n");
return 0;
}
@@ -258,12 +258,15 @@ static int xgpu_ai_wait_reset(struct amdgpu_device *adev)
{
int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
do {
- if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
+ if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
+ dev_dbg(adev->dev, "Got AI IDH_FLR_NOTIFICATION_CMPL after %d ms\n", AI_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
return 0;
+ }
msleep(10);
timeout -= 10;
} while (timeout > 1);
- dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
+
+ dev_dbg(adev->dev, "waiting AI IDH_FLR_NOTIFICATION_CMPL timeout\n");
return -ETIME;
}
@@ -405,6 +408,13 @@ static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev,
xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
}
+static bool xgpu_ai_rcvd_ras_intr(struct amdgpu_device *adev)
+{
+ enum idh_event msg = xgpu_ai_mailbox_peek_msg(adev);
+
+ return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
@@ -414,4 +424,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.trans_msg = xgpu_ai_mailbox_trans_msg,
.req_init_data = xgpu_ai_request_init_data,
.ras_poison_handler = xgpu_ai_ras_poison_handler,
+ .rcvd_ras_intr = xgpu_ai_rcvd_ras_intr,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index c520b2fabfb9..ed57cbc150af 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -51,7 +51,9 @@ enum idh_event {
IDH_FAIL,
IDH_QUERY_ALIVE,
IDH_REQ_GPU_INIT_DATA_READY,
-
+ IDH_RAS_POISON_READY,
+ IDH_PF_SOFT_FLR_NOTIFICATION,
+ IDH_RAS_ERROR_DETECTED,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 22af30a15a5f..f47bd7ada4d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -91,7 +91,7 @@ static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
timeout -= 5;
} while (timeout > 1);
- pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
+ dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
return -ETIME;
}
@@ -106,13 +106,16 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
do {
r = xgpu_nv_mailbox_rcv_msg(adev, event);
- if (!r)
+ if (!r) {
+ dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
return 0;
+ }
msleep(10);
now = (uint64_t)ktime_to_ms(ktime_get());
} while (timeout > now);
+ dev_dbg(adev->dev, "nv_poll_msg timed out\n");
return -ETIME;
}
@@ -133,11 +136,12 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_nv_mailbox_set_valid(adev, false);
trn = xgpu_nv_peek_ack(adev);
if (trn) {
- pr_err("trn=%x ACK should not assert! wait again !\n", trn);
+ dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
msleep(1);
}
} while (trn);
+ dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1);
WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
@@ -147,7 +151,7 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
/* start to poll ack */
r = xgpu_nv_poll_ack(adev);
if (r)
- pr_err("Doesn't get ack from pf, continue\n");
+ dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
xgpu_nv_mailbox_set_valid(adev, false);
}
@@ -185,7 +189,7 @@ send_request:
goto send_request;
if (req != IDH_REQ_GPU_INIT_DATA) {
- pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+ dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
return r;
} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
adev->virt.req_init_data_ver = 0;
@@ -261,7 +265,7 @@ static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- DRM_DEBUG("get ack intr and do nothing.\n");
+ dev_dbg(adev->dev, "get ack intr and do nothing.\n");
return 0;
}
@@ -291,12 +295,15 @@ static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
{
int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
do {
- if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
+ if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
+ dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
return 0;
+ }
msleep(10);
timeout -= 10;
} while (timeout > 1);
- dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
+
+ dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n");
return -ETIME;
}
@@ -442,6 +449,13 @@ static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
}
}
+static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
+{
+ enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev);
+
+ return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -451,4 +465,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.wait_reset = xgpu_nv_wait_reset,
.trans_msg = xgpu_nv_mailbox_trans_msg,
.ras_poison_handler = xgpu_nv_ras_poison_handler,
+ .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 1e8fd90cab43..caf616a2c8a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -26,7 +26,7 @@
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
-#define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT 10000
#define NV_MAILBOX_POLL_MSG_REP_MAX 11
enum idh_request {
@@ -52,7 +52,8 @@ enum idh_event {
IDH_QUERY_ALIVE,
IDH_REQ_GPU_INIT_DATA_READY,
IDH_RAS_POISON_READY,
-
+ IDH_PF_SOFT_FLR_NOTIFICATION,
+ IDH_RAS_ERROR_DETECTED,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
index 5a20bb229788..39919e0892c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
@@ -345,6 +345,7 @@ static void nbif_v6_3_1_program_aspm(struct amdgpu_device *adev)
{
#ifdef CONFIG_PCIEASPM
uint32_t def, data;
+ u16 devctl2, ltr;
def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
@@ -374,12 +375,17 @@ static void nbif_v6_3_1_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data);
- def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
- data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_DEVCTL2, &devctl2);
+ data = def = devctl2;
+ data &= ~PCI_EXP_DEVCTL2_LTR_EN;
if (def != data)
- WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+ pcie_capability_set_word(adev->pdev, PCI_EXP_DEVCTL2, (u16)data);
+
+ ltr = pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_LTR);
- WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+ if (ltr) {
+ pci_write_config_dword(adev->pdev, ltr + PCI_LTR_MAX_SNOOP_LAT, 0x10011001);
+ }
#if 0
/* regPSWUSP0_PCIE_LC_CNTL2 should be replace by PCIE_LC_CNTL2 or someone else ? */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index a15673e2dc99..d27fb4ea6612 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -428,6 +428,7 @@ static int soc24_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
+ AMD_PG_SUPPORT_JPEG_DPG |
AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x50;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 6d6350f220b0..0faa21d8a7b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -557,7 +557,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
if (ret) {
if (ret == -EEXIST)
- con->umc_ecc_log.de_updated = true;
+ con->umc_ecc_log.de_queried_count++;
else
dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
@@ -566,7 +566,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
return ret;
}
- con->umc_ecc_log.de_updated = true;
+ con->umc_ecc_log.de_queried_count++;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d2fceb6f9802..4f48507418d2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -230,6 +230,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
if (queue_type < 0) {
dev_err(adev->dev, "Queue type not supported with MES, queue:%d\n",
q->properties.type);
+ up_read(&adev->reset_domain->sem);
return -EINVAL;
}
queue_input.queue_type = (uint32_t)queue_type;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c96407379224..d743e2705399 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -368,15 +368,6 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
return false;
}
-static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
- int planes_count)
-{
- int i, j;
-
- for (i = 0, j = planes_count - 1; i < j; i++, j--)
- swap(array_of_surface_update[i], array_of_surface_update[j]);
-}
-
/*
* DC will program planes with their z-order determined by their ordering
* in the dc_surface_updates array. This comparator is used to sort them
@@ -1302,6 +1293,7 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
enum dmub_status status;
bool init;
+ int r;
if (!dmub_srv) {
/* DMUB isn't supported on the ASIC. */
@@ -1319,7 +1311,9 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
} else {
/* Perform the full hardware initialization. */
- dm_dmub_hw_init(adev);
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
}
}
@@ -11090,8 +11084,12 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
int cursor_scale_w, cursor_scale_h;
int i;
- /* Overlay cursor not supported on HW before DCN */
- if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0) {
+ /* Overlay cursor not supported on HW before DCN
+ * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions
+ * as previous DCN generations, so enable native mode on DCN401 in addition to DCE
+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 ||
+ amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) {
*cursor_mode = DM_CURSOR_NATIVE_MODE;
return 0;
}
@@ -11234,7 +11232,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state;
enum dc_status status;
int ret, i;
bool lock_and_validation_needed = false;
@@ -11462,19 +11460,39 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc);
}
- /* Check cursor planes restrictions */
+ /* Check cursor restrictions */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
enum amdgpu_dm_cursor_mode required_cursor_mode;
+ int is_rotated, is_scaled;
/* Overlay cusor not subject to native cursor restrictions */
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
continue;
+ /* Check if rotation or scaling is enabled on DCN401 */
+ if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) &&
+ amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) {
+ new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
+
+ is_rotated = new_cursor_state &&
+ ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0);
+ is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) ||
+ (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h));
+
+ if (is_rotated || is_scaled) {
+ drm_dbg_driver(
+ crtc->dev,
+ "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n",
+ crtc->base.id, crtc->name);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
/* If HW can only do native cursor, check restrictions again */
ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
&required_cursor_mode);
-
if (ret) {
drm_dbg_driver(crtc->dev,
"[CRTC:%d:%s] Checking cursor mode failed\n",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index dfcbc1970fe6..5fd1b6b44577 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -989,4 +989,7 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev,
enum dc_gpu_mem_alloc_type type,
size_t size,
long long *addr);
+
+bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index e16eecb146fd..99014339aaa3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -162,33 +162,63 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
}
}
+bool amdgpu_dm_is_headless(struct amdgpu_device *adev)
+{
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_device *dev;
+ bool is_headless = true;
+
+ if (adev == NULL)
+ return true;
+
+ dev = adev->dm.ddev;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ if (connector->status == connector_status_connected) {
+ is_headless = false;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ return is_headless;
+}
+
static void amdgpu_dm_idle_worker(struct work_struct *work)
{
struct idle_workqueue *idle_work;
idle_work = container_of(work, struct idle_workqueue, work);
idle_work->dm->idle_workqueue->running = true;
- fsleep(HPD_DETECTION_PERIOD_uS);
- mutex_lock(&idle_work->dm->dc_lock);
+
while (idle_work->enable) {
- if (!idle_work->dm->dc->idle_optimizations_allowed)
+ fsleep(HPD_DETECTION_PERIOD_uS);
+ mutex_lock(&idle_work->dm->dc_lock);
+ if (!idle_work->dm->dc->idle_optimizations_allowed) {
+ mutex_unlock(&idle_work->dm->dc_lock);
break;
-
+ }
dc_allow_idle_optimizations(idle_work->dm->dc, false);
mutex_unlock(&idle_work->dm->dc_lock);
fsleep(HPD_DETECTION_TIME_uS);
mutex_lock(&idle_work->dm->dc_lock);
- if (!amdgpu_dm_psr_is_active_allowed(idle_work->dm))
+ if (!amdgpu_dm_is_headless(idle_work->dm->adev) &&
+ !amdgpu_dm_psr_is_active_allowed(idle_work->dm)) {
+ mutex_unlock(&idle_work->dm->dc_lock);
break;
+ }
- dc_allow_idle_optimizations(idle_work->dm->dc, true);
+ if (idle_work->enable)
+ dc_allow_idle_optimizations(idle_work->dm->dc, true);
mutex_unlock(&idle_work->dm->dc_lock);
- fsleep(HPD_DETECTION_PERIOD_uS);
- mutex_lock(&idle_work->dm->dc_lock);
}
- mutex_unlock(&idle_work->dm->dc_lock);
idle_work->dm->idle_workqueue->running = false;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 717d97191dda..62cb59f00929 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3795,6 +3795,7 @@ static int trigger_hpd_mst_set(void *data, u64 val)
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct dc_link *link = NULL;
+ int ret;
if (val == 1) {
drm_connector_list_iter_begin(dev, &iter);
@@ -3806,7 +3807,9 @@ static int trigger_hpd_mst_set(void *data, u64 val)
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
mutex_unlock(&adev->dm.dc_lock);
- drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0)
+ DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
}
}
} else if (val == 0) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index adbf560d6a74..97614947d75b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1239,8 +1239,11 @@ void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
{
struct amdgpu_device *adev = ctx->driver_context;
- if (adev->dm.idle_workqueue)
+ if (adev->dm.idle_workqueue) {
adev->dm.idle_workqueue->enable = enable;
+ if (enable && !adev->dm.idle_workqueue->running && amdgpu_dm_is_headless(adev))
+ schedule_work(&adev->dm.idle_workqueue->work);
+ }
}
void dm_helpers_dp_mst_update_branch_bandwidth(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 48118447c8d9..5d4f831b1e55 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1691,7 +1691,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (aconnector->mst_output_port->passthrough_aux) {
if (bw_range.min_kbps > end_to_end_bw_in_kbps) {
DRM_DEBUG_DRIVER("DSC passthrough. Max dsc compression can't fit into end-to-end bw\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
/*dsc bitstream decoded at the dp last link*/
@@ -1756,7 +1756,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (branch_max_throughput_mps != 0 &&
((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) {
DRM_DEBUG_DRIVER("DSC is required but max throughput mps fails");
- return DC_FAIL_BANDWIDTH_VALIDATE;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
DRM_DEBUG_DRIVER("DSC is required but can't find common dsc config.");
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index cc000833d300..4254bdfefe38 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -227,7 +227,8 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev;
uint8_t crev = 0;
- BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
+ if (!BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev))
+ BREAK_TO_DEBUGGER();
switch (crev) {
case 6:
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index f77840dd051e..7920f6f1aa62 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -113,8 +113,6 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
}
- clk_mgr_helper_get_active_display_cnt(dc, context);
-
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz))
clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 06f0c41ad6f1..3b10b24f5e23 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -163,9 +163,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
- struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
+ struct clk_limit_num_entries *num_entries_per_clk;
unsigned int i;
+ if (!clk_mgr_base->bw_params)
+ return;
+
+ num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
+
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
@@ -173,9 +178,6 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
clk_mgr->smu_present = false;
clk_mgr->dpm_present = false;
- if (!clk_mgr_base->bw_params)
- return;
-
if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
clk_mgr->smu_present = true;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 70f06a7c882e..606b2411eee9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -207,9 +207,14 @@ static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr)
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
+ struct clk_limit_num_entries *num_entries_per_clk;
unsigned int i;
+ if (!clk_mgr_base->bw_params)
+ return;
+
+ num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
+
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
@@ -217,9 +222,6 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
clk_mgr->smu_present = false;
clk_mgr->dpm_present = false;
- if (!clk_mgr_base->bw_params)
- return;
-
if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
clk_mgr->smu_present = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index a4ba6f99cd34..85a2ef82afa5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1264,6 +1264,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
dc->hwss.post_unlock_program_front_end(dc, dangling_context);
}
+
+ if (dc->res_pool->funcs->prepare_mcache_programming)
+ dc->res_pool->funcs->prepare_mcache_programming(dc, dangling_context);
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
dc->hwss.program_front_end_for_ctx(dc, dangling_context);
@@ -2037,6 +2040,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
+ if (dc->res_pool->funcs->prepare_mcache_programming)
+ dc->res_pool->funcs->prepare_mcache_programming(dc, context);
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
@@ -3884,6 +3889,9 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
+ if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
+ dc->res_pool->funcs->prepare_mcache_programming(dc, context);
+
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
@@ -3903,6 +3911,10 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
+ if (dc->hwss.wait_for_dcc_meta_propagation) {
+ dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
+ }
+
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 5037474bf95c..87e36d51c56d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -595,6 +595,12 @@ void hwss_build_fast_sequence(struct dc *dc,
if (!plane || !stream)
return;
+ if (dc->hwss.wait_for_dcc_meta_propagation) {
+ block_sequence[*num_steps].params.wait_for_dcc_meta_propagation_params.dc = dc;
+ block_sequence[*num_steps].params.wait_for_dcc_meta_propagation_params.top_pipe_to_program = pipe_ctx;
+ block_sequence[*num_steps].func = HUBP_WAIT_FOR_DCC_META_PROP;
+ (*num_steps)++;
+ }
if (dc->hwss.subvp_pipe_control_lock_fast) {
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true;
@@ -835,6 +841,11 @@ void hwss_execute_sequence(struct dc *dc,
case DMUB_SUBVP_SAVE_SURF_ADDR:
hwss_subvp_save_surf_addr(params);
break;
+ case HUBP_WAIT_FOR_DCC_META_PROP:
+ dc->hwss.wait_for_dcc_meta_propagation(
+ params->wait_for_dcc_meta_propagation_params.dc,
+ params->wait_for_dcc_meta_propagation_params.top_pipe_to_program);
+ break;
case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST:
dc->hwss.fams2_global_control_lock_fast(params);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 9b24f448ce50..de0633f98158 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -416,6 +416,35 @@ bool dc_stream_program_cursor_position(
if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
dc_allow_idle_optimizations(dc, true);
+ /* apply/update visual confirm */
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) {
+ /* update software state */
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* adjust visual confirm color for all pipes with current stream */
+ if (stream == pipe_ctx->stream) {
+ if (stream->cursor_position.enable) {
+ pipe_ctx->visual_confirm_color.color_r_cr = color_value;
+ pipe_ctx->visual_confirm_color.color_g_y = 0;
+ pipe_ctx->visual_confirm_color.color_b_cb = 0;
+ } else {
+ pipe_ctx->visual_confirm_color.color_r_cr = 0;
+ pipe_ctx->visual_confirm_color.color_g_y = 0;
+ pipe_ctx->visual_confirm_color.color_b_cb = color_value;
+ }
+
+ /* programming hardware */
+ if (pipe_ctx->plane_state)
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
+ pipe_ctx->plane_res.hubp->mpcc_id);
+ }
+ }
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index d0d1af451b64..900892855436 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.289"
+#define DC_VER "3.2.290"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -333,6 +333,9 @@ struct dc_dcc_setting {
uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
+ uint32_t dcc_256_256 : 1; //available in ASICs starting with DCN 4.0x (the best compression case)
+ uint32_t dcc_256_128 : 1; //available in ASICs starting with DCN 4.0x
+ uint32_t dcc_256_64 : 1; //available in ASICs starting with DCN 4.0x (the worst compression case)
} dcc_controls;
};
@@ -476,6 +479,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SUBVP = 14,
VISUAL_CONFIRM_MCLK_SWITCH = 16,
VISUAL_CONFIRM_FAMS2 = 19,
+ VISUAL_CONFIRM_HW_CURSOR = 20,
};
enum dc_psr_power_opts {
@@ -1036,9 +1040,10 @@ struct dc_debug_options {
unsigned int static_screen_wait_frames;
uint32_t pwm_freq;
bool force_chroma_subsampling_1tap;
+ unsigned int dcc_meta_propagation_delay_us;
bool disable_422_left_edge_pixel;
bool dml21_force_pstate_method;
- uint32_t dml21_force_pstate_method_value;
+ uint32_t dml21_force_pstate_method_values[MAX_PIPES];
uint32_t dml21_disable_pstate_method_mask;
union dmub_fams2_global_feature_config fams2_config;
bool enable_legacy_clock_update;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index b2051c4dd7b6..ded13026c8ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -245,7 +245,9 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
- dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ if (status != DMUB_STATUS_OK)
+ return false;
/* Requeue the command. */
status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
@@ -517,7 +519,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
union dmub_rb_cmd cmd = { 0 };
unsigned int panel_inst = 0;
- dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
+ if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst))
+ return;
memset(&cmd, 0, sizeof(cmd));
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 21f4af9ab096..c550e8997033 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1036,6 +1036,7 @@ enum replay_FW_Message_type {
Replay_Set_Residency_Frameupdate_Timer,
Replay_Set_Pseudo_VTotal,
Replay_Disabled_Adaptive_Sync_SDP,
+ Replay_Set_General_Cmd,
};
union replay_error_status {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 6a467c49b4a4..07f1f396ba52 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -853,43 +853,30 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
}
/*get other front end connected to this backend*/
-static uint8_t dccg401_get_other_enabled_symclk_fe(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+static uint8_t dccg401_get_number_enabled_symclk_fe_connected_to_be(struct dccg *dccg, uint32_t link_enc_inst)
{
uint8_t num_enabled_symclk_fe = 0;
- uint32_t be_clk_en = 0, fe_clk_en[4] = {0}, be_clk_sel[4] = {0};
+ uint32_t fe_clk_en[4] = {0}, be_clk_sel[4] = {0};
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint8_t i;
- switch (link_enc_inst) {
- case 0:
- REG_GET_3(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, &be_clk_en,
- SYMCLKA_FE_EN, &fe_clk_en[0],
- SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]);
- break;
- case 1:
- REG_GET_3(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, &be_clk_en,
- SYMCLKB_FE_EN, &fe_clk_en[1],
- SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]);
- break;
- case 2:
- REG_GET_3(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, &be_clk_en,
- SYMCLKC_FE_EN, &fe_clk_en[2],
- SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]);
- break;
- case 3:
- REG_GET_3(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, &be_clk_en,
- SYMCLKD_FE_EN, &fe_clk_en[3],
- SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]);
- break;
- }
- if (be_clk_en) {
- /* for DPMST, this backend could be used by multiple front end.
- only disable the backend if this stream_enc_ins is the last active stream enc connected to this back_end*/
- uint8_t i;
- for (i = 0; i != link_enc_inst && i < ARRAY_SIZE(fe_clk_en); i++) {
- if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst)
- num_enabled_symclk_fe++;
- }
+ REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, &fe_clk_en[0],
+ SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]);
+
+ REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, &fe_clk_en[1],
+ SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]);
+
+ REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, &fe_clk_en[2],
+ SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]);
+
+ REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, &fe_clk_en[3],
+ SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]);
+
+ for (i = 0; i < ARRAY_SIZE(fe_clk_en); i++) {
+ if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst)
+ num_enabled_symclk_fe++;
}
+
return num_enabled_symclk_fe;
}
@@ -921,9 +908,9 @@ static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_ins
break;
}
- /*check other enabled symclk fe */
- num_enabled_symclk_fe = dccg401_get_other_enabled_symclk_fe(dccg, stream_enc_inst, link_enc_inst);
- /*only turn off backend clk if other front end attachecd to this backend are all off,
+ /*check other enabled symclk fe connected to this be */
+ num_enabled_symclk_fe = dccg401_get_number_enabled_symclk_fe_connected_to_be(dccg, link_enc_inst);
+ /*only turn off backend clk if other front ends attached to this backend are all off,
for mst, only turn off the backend if this is the last front end*/
if (num_enabled_symclk_fe == 0) {
switch (link_enc_inst) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 136bd93c3b65..4a9d07c31bc5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -1361,7 +1361,10 @@ void dce110_link_encoder_dp_set_lane_settings(
cntl.lane_settings = training_lane_set.raw;
/* call VBIOS table to set voltage swing and pre-emphasis */
- link_transmitter_control(enc110, &cntl);
+ if (link_transmitter_control(enc110, &cntl) != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__);
+ BREAK_TO_DEBUGGER();
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 5437ebd8bc21..2a21bcf5224f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -346,13 +346,29 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.header.sub_type =
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP;
cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal);
+ sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp);
//Cmd Body
cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst =
cmd_element->disabled_adaptive_sync_sdp_data.panel_inst;
cmd.replay_disabled_adaptive_sync_sdp.data.force_disabled =
cmd_element->disabled_adaptive_sync_sdp_data.force_disabled;
break;
+ case Replay_Set_General_Cmd:
+ //Header
+ cmd.replay_set_general_cmd.header.sub_type =
+ DMUB_CMD__REPLAY_SET_GENERAL_CMD;
+ cmd.replay_set_general_cmd.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_general_cmd);
+ //Cmd Body
+ cmd.replay_set_general_cmd.data.panel_inst =
+ cmd_element->set_general_cmd_data.panel_inst;
+ cmd.replay_set_general_cmd.data.subtype =
+ cmd_element->set_general_cmd_data.subtype;
+ cmd.replay_set_general_cmd.data.param1 =
+ cmd_element->set_general_cmd_data.param1;
+ cmd.replay_set_general_cmd.data.param2 =
+ cmd_element->set_general_cmd_data.param2;
+ break;
case Replay_Msg_Not_Support:
default:
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 4d0eed7598b2..e0558a78b11c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1104,6 +1104,7 @@ void dcn10_link_encoder_dp_set_lane_settings(
union dpcd_training_lane_set training_lane_set = { { 0 } };
int32_t lane = 0;
struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
if (!link_settings) {
BREAK_TO_DEBUGGER();
@@ -1138,7 +1139,12 @@ void dcn10_link_encoder_dp_set_lane_settings(
cntl.lane_settings = training_lane_set.raw;
/* call VBIOS table to set voltage swing and pre-emphasis */
- link_transmitter_control(enc10, &cntl);
+ result = link_transmitter_control(enc10, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", __func__);
+ BREAK_TO_DEBUGGER();
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index fadf5e872e38..dc37dbf870df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -1,12 +1,5 @@
-#
-# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
-#
-# All rights reserved. This notice is intended as a precaution against
-# inadvertent publication and does not imply publication or any waiver
-# of confidentiality. The year included in the foregoing notice is the
-# year of creation of the work.
-#
-# Authors: AMD
+# SPDX-License-Identifier: MIT
+# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved.
#
# Makefile for dcn30.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index 62c8ab0e45aa..d9816313c3b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -1,12 +1,6 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved.
#
-# Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
-#
-# All rights reserved. This notice is intended as a precaution against
-# inadvertent publication and does not imply publication or any waiver
-# of confidentiality. The year included in the foregoing notice is the
-# year of creation of the work.
-#
-# Authors: AMD
#
# Makefile for dcn31.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index cac756c75b7f..15fdcf7c6466 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -1,12 +1,5 @@
-#
-# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
-#
-# All rights reserved. This notice is intended as a precaution against
-# inadvertent publication and does not imply publication or any waiver
-# of confidentiality. The year included in the foregoing notice is the
-# year of creation of the work.
-#
-# Authors: AMD
+# SPDX-License-Identifier: MIT
+# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved.
#
# Makefile for dcn314.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/Makefile b/drivers/gpu/drm/amd/display/dc/dcn401/Makefile
index 2989e706bccf..ded1f3140beb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn401/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn401/Makefile
@@ -1,6 +1,5 @@
-#
-# Copyright © 2023 Advanced Micro Devices, Inc. All rights reserved.
-#
+# SPDX-License-Identifier: MIT
+# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved.
DCN401 += dcn401_dio_link_encoder.o
DCN401 += dcn401_dio_stream_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 54dd7e164635..8a8efe408a9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1084,7 +1084,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
struct dc_stream_status *stream_status = &context->stream_status[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
- bool is_pwrseq0 = link->link_index == 0;
+ bool is_pwrseq0 = (link && link->link_index == 0);
bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
bool is_replay = link && link->replay_settings.replay_feature_enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 07146569e335..7c56ad0f8812 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -688,12 +688,11 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
const display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
- bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
- unsigned int ppe = mode_422 ? 2 : 1;
+ unsigned int ppe = 1;
// TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
@@ -825,7 +824,6 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
double min_dst_y_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
- bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
@@ -971,7 +969,6 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1148,13 +1145,8 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
- if (mode_422) {
- swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
- swath_width_pixels_ub_c = swath_width_ub_c * 2;
- } else {
- swath_width_pixels_ub_l = swath_width_ub_l * 1;
- swath_width_pixels_ub_c = swath_width_ub_c * 1;
- }
+ swath_width_pixels_ub_l = swath_width_ub_l;
+ swath_width_pixels_ub_c = swath_width_ub_c;
if (htaps_l <= 1)
min_hratio_fact_l = 2.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index f4bba1f2aeb6..3d95bfa5aca2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -688,12 +688,11 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
const display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
- bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
- unsigned int ppe = mode_422 ? 2 : 1;
+ unsigned int ppe = 1;
// TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
@@ -825,7 +824,6 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
double min_dst_y_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
- bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
@@ -972,7 +970,6 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1149,13 +1146,8 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
- if (mode_422) {
- swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
- swath_width_pixels_ub_c = swath_width_ub_c * 2;
- } else {
- swath_width_pixels_ub_l = swath_width_ub_l * 1;
- swath_width_pixels_ub_c = swath_width_ub_c * 1;
- }
+ swath_width_pixels_ub_l = swath_width_ub_l;
+ swath_width_pixels_ub_c = swath_width_ub_c;
if (htaps_l <= 1)
min_hratio_fact_l = 2.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index c229a9edf82a..98502a4f0567 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -694,12 +694,11 @@ static void get_surf_rq_param(
const display_pipe_params_st *pipe_param,
bool is_chroma)
{
- bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
- unsigned int ppe = mode_422 ? 2 : 1;
+ unsigned int ppe = 1;
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
@@ -868,7 +867,6 @@ static void dml_rq_dlg_get_dlg_params(
double min_dst_y_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
- bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
@@ -1020,7 +1018,6 @@ static void dml_rq_dlg_get_dlg_params(
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
- mode_422 = false; // FIXME
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1197,13 +1194,8 @@ static void dml_rq_dlg_get_dlg_params(
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
- if (mode_422) {
- swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
- swath_width_pixels_ub_c = swath_width_ub_c * 2;
- } else {
- swath_width_pixels_ub_l = swath_width_ub_l * 1;
- swath_width_pixels_ub_c = swath_width_ub_c * 1;
- }
+ swath_width_pixels_ub_l = swath_width_ub_l;
+ swath_width_pixels_ub_c = swath_width_ub_c;
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index f3ee7baac786..b28fcc8608ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -660,13 +660,12 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
bool is_chroma,
bool is_alpha)
{
- bool mode_422 = 0;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int surface_height = 0;
- unsigned int ppe = mode_422 ? 2 : 1;
+ unsigned int ppe = 1;
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
@@ -934,7 +933,6 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
double min_dst_y_ttu_vblank = 0;
unsigned int dlg_vblank_start = 0;
bool dual_plane = false;
- bool mode_422 = false;
unsigned int access_dir = 0;
unsigned int vp_height_l = 0;
unsigned int vp_width_l = 0;
@@ -1083,7 +1081,6 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
@@ -1301,13 +1298,8 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
- if (mode_422) {
- swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
- swath_width_pixels_ub_c = swath_width_ub_c * 2;
- } else {
- swath_width_pixels_ub_l = swath_width_ub_l * 1;
- swath_width_pixels_ub_c = swath_width_ub_c * 1;
- }
+ swath_width_pixels_ub_l = swath_width_ub_l;
+ swath_width_pixels_ub_c = swath_width_ub_c;
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index b6d954d9aa00..b57b095cd4a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -655,13 +655,12 @@ static void get_surf_rq_param(
bool is_chroma,
bool is_alpha)
{
- bool mode_422 = 0;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int surface_height = 0;
- unsigned int ppe = mode_422 ? 2 : 1;
+ unsigned int ppe = 1;
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
@@ -888,7 +887,6 @@ static void dml_rq_dlg_get_dlg_params(
double min_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
- bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
@@ -1004,7 +1002,6 @@ static void dml_rq_dlg_get_dlg_params(
// Prefetch Calc
// Source
dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
- mode_422 = 0;
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
@@ -1142,13 +1139,8 @@ static void dml_rq_dlg_get_dlg_params(
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
- if (mode_422) {
- swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
- swath_width_pixels_ub_c = swath_width_ub_c * 2;
- } else {
- swath_width_pixels_ub_l = swath_width_ub_l * 1;
- swath_width_pixels_ub_c = swath_width_ub_c * 1;
- }
+ swath_width_pixels_ub_l = swath_width_ub_l;
+ swath_width_pixels_ub_c = swath_width_ub_c;
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index 94975b0fa398..61b3bebf24c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -743,13 +743,12 @@ static void get_surf_rq_param(
bool is_chroma,
bool is_alpha)
{
- bool mode_422 = 0;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
unsigned int surface_height = 0;
- unsigned int ppe = mode_422 ? 2 : 1;
+ unsigned int ppe = 1;
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
@@ -973,7 +972,6 @@ static void dml_rq_dlg_get_dlg_params(
double min_ttu_vblank;
unsigned int dlg_vblank_start;
bool dual_plane;
- bool mode_422;
unsigned int access_dir;
unsigned int vp_height_l;
unsigned int vp_width_l;
@@ -1091,7 +1089,6 @@ static void dml_rq_dlg_get_dlg_params(
// Prefetch Calc
// Source
dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
- mode_422 = 0;
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
@@ -1230,13 +1227,8 @@ static void dml_rq_dlg_get_dlg_params(
dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
- if (mode_422) {
- swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
- swath_width_pixels_ub_c = swath_width_ub_c * 2;
- } else {
- swath_width_pixels_ub_l = swath_width_ub_l * 1;
- swath_width_pixels_ub_c = swath_width_ub_c * 1;
- }
+ swath_width_pixels_ub_l = swath_width_ub_l;
+ swath_width_pixels_ub_c = swath_width_ub_c;
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index d5ead0205053..06387b8b0aee 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -1000,7 +1000,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
/* apply forced pstate policy */
if (dml_ctx->config.pmo.force_pstate_method_enable) {
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].overrides.uclk_pstate_change_strategy =
- dml21_force_pstate_method_to_uclk_state_change_strategy(dml_ctx->config.pmo.force_pstate_method_value);
+ dml21_force_pstate_method_to_uclk_state_change_strategy(dml_ctx->config.pmo.force_pstate_method_values[stream_index]);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index b442e1f9f204..c310354cd5fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -47,7 +47,8 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex
/* UCLK P-State options */
if (in_dc->debug.dml21_force_pstate_method) {
dml_ctx->config.pmo.force_pstate_method_enable = true;
- dml_ctx->config.pmo.force_pstate_method_value = in_dc->debug.dml21_force_pstate_method_value;
+ for (int i = 0; i < MAX_PIPES; i++)
+ dml_ctx->config.pmo.force_pstate_method_values[i] = in_dc->debug.dml21_force_pstate_method_values[i];
} else {
dml_ctx->config.pmo.force_pstate_method_enable = false;
}
@@ -257,6 +258,7 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
mode_support->dml2_instance = dml_init->dml2_instance;
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
+ dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
is_supported = dml2_check_mode_supported(mode_support);
if (!is_supported)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index be73784e21eb..6f4026e396e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -11521,8 +11521,8 @@ void dml2_core_calcs_get_dpte_row_height(
unsigned int MacroTileWidthC;
unsigned int MacroTileHeightY;
unsigned int MacroTileHeightC;
- bool surf_linear_128_l;
- bool surf_linear_128_c;
+ bool surf_linear_128_l = false;
+ bool surf_linear_128_c = false;
CalculateBytePerPixelAndBlockSizes(
SourcePixelFormat,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
index 0099e58e0b1a..81f0a6f19f87 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
@@ -3142,62 +3142,62 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode
{
switch (sw_mode) {
case (dml2_sw_linear):
- return 256; break;
+ return 256;
case (dml2_sw_256b_2d):
- return 256; break;
+ return 256;
case (dml2_sw_4kb_2d):
- return 4096; break;
+ return 4096;
case (dml2_sw_64kb_2d):
- return 65536; break;
+ return 65536;
case (dml2_sw_256kb_2d):
- return 262144; break;
+ return 262144;
case (dml2_gfx11_sw_linear):
- return 256; break;
+ return 256;
case (dml2_gfx11_sw_64kb_d):
- return 65536; break;
+ return 65536;
case (dml2_gfx11_sw_64kb_d_t):
- return 65536; break;
+ return 65536;
case (dml2_gfx11_sw_64kb_d_x):
- return 65536; break;
+ return 65536;
case (dml2_gfx11_sw_64kb_r_x):
- return 65536; break;
+ return 65536;
case (dml2_gfx11_sw_256kb_d_x):
- return 262144; break;
+ return 262144;
case (dml2_gfx11_sw_256kb_r_x):
- return 262144; break;
+ return 262144;
default:
DML2_ASSERT(0);
return 256;
- };
+ }
}
const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
{
switch (bw_type) {
case (dml2_core_internal_bw_sdp):
- return("dml2_core_internal_bw_sdp"); break;
+ return("dml2_core_internal_bw_sdp");
case (dml2_core_internal_bw_dram):
- return("dml2_core_internal_bw_dram"); break;
+ return("dml2_core_internal_bw_dram");
case (dml2_core_internal_bw_max):
- return("dml2_core_internal_bw_max"); break;
+ return("dml2_core_internal_bw_max");
default:
- return("dml2_core_internal_bw_unknown"); break;
- };
+ return("dml2_core_internal_bw_unknown");
+ }
}
const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
{
switch (dml2_core_internal_soc_state_type) {
case (dml2_core_internal_soc_state_sys_idle):
- return("dml2_core_internal_soc_state_sys_idle"); break;
+ return("dml2_core_internal_soc_state_sys_idle");
case (dml2_core_internal_soc_state_sys_active):
- return("dml2_core_internal_soc_state_sys_active"); break;
+ return("dml2_core_internal_soc_state_sys_active");
case (dml2_core_internal_soc_state_svp_prefetch):
- return("dml2_core_internal_soc_state_svp_prefetch"); break;
+ return("dml2_core_internal_soc_state_svp_prefetch");
case dml2_core_internal_soc_state_max:
default:
- return("dml2_core_internal_soc_state_unknown"); break;
- };
+ return("dml2_core_internal_soc_state_unknown");
+ }
}
static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 00fedc00a735..603036df68ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -1163,7 +1163,6 @@ static bool is_config_schedulable(
schedulable = true;
/* sort disallow times from greatest to least */
- unsigned int temp;
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
bool swapped = false;
@@ -1172,9 +1171,8 @@ static bool is_config_schedulable(
double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
if (j_disallow_us < jp1_disallow_us) {
/* swap as A < B */
- temp = s->pmo_dcn4.sorted_group_gtl_disallow_index[j];
- s->pmo_dcn4.sorted_group_gtl_disallow_index[j] = s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1];
- s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1] = temp;
+ swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
+ s->pmo_dcn4.sorted_group_gtl_disallow_index[j+1]);
swapped = true;
}
}
@@ -1232,9 +1230,8 @@ static bool is_config_schedulable(
double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
if (j_period_us < jp1_period_us) {
/* swap as A < B */
- temp = s->pmo_dcn4.sorted_group_gtl_period_index[j];
- s->pmo_dcn4.sorted_group_gtl_period_index[j] = s->pmo_dcn4.sorted_group_gtl_period_index[j + 1];
- s->pmo_dcn4.sorted_group_gtl_period_index[j + 1] = temp;
+ swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
+ s->pmo_dcn4.sorted_group_gtl_period_index[j+1]);
swapped = true;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
index 6f334fdc6eb8..2fb3e2f45e07 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
@@ -96,10 +96,15 @@ bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
{
struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals;
+ /* Borrow the build_mode_programming_locals programming struct for DPMM call. */
+ struct dml2_display_cfg_programming *dpmm_programming = dml->scratch.build_mode_programming_locals.mode_programming_params.programming;
bool result = false;
bool mcache_success = false;
+ if (dpmm_programming)
+ memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
+
setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
l->mode_support_params.instance = &dml->core_instance;
@@ -122,6 +127,18 @@ bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &mcache_phase);
}
+ /*
+ * Call DPMM to map all requirements to minimum clock state
+ */
+ if (result && dpmm_programming) {
+ l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
+ l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_mode_params.programming = dpmm_programming;
+ l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
+ l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
+ result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
+ }
+
in_out->is_supported = mcache_success;
result = result && in_out->is_supported;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index dd90c5df5a5a..5632cdacb7f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -870,6 +870,7 @@ struct dml2_check_mode_supported_locals {
struct dml2_optimization_phase_locals optimization_phase_locals;
struct display_configuation_with_meta base_display_config_with_meta;
struct display_configuation_with_meta optimized_display_config_with_meta;
+ struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out dppm_map_mode_params;
};
struct optimization_init_function_params {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 282d70e2b18a..3d29169dd6bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -750,6 +750,8 @@ static void enable_phantom_plane(struct dml2_context *ctx,
ctx->config.svp_pstate.callbacks.dc,
state,
curr_pipe->plane_state);
+ if (!phantom_plane)
+ return;
}
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index f6310408dbba..c58235121474 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -442,7 +442,6 @@ static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct d
bool result = false;
int drr_display_index = 0, non_svp_streams = 0;
bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
- bool advanced_pstate_switching = false;
display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
@@ -451,8 +450,7 @@ static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct d
if (!result) {
pstate_optimization_done = true;
- } else if (!advanced_pstate_switching ||
- (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp)) {
+ } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) {
pstate_optimization_success = true;
pstate_optimization_done = true;
}
@@ -628,6 +626,21 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
if (result) {
unsigned int lowest_state_idx = s->mode_support_params.out_lowest_state_idx;
+ double min_fclk_mhz_for_urgent_workaround = (double)dml2->config.min_fclk_for_urgent_workaround_khz / 1000.0;
+ double max_frac_urgent = (double)dml2->config.max_frac_urgent_for_min_fclk_x1000 / 1000.0;
+
+ if (min_fclk_mhz_for_urgent_workaround > 0.0 && max_frac_urgent > 0.0 &&
+ (dml2->v20.dml_core_ctx.mp.FractionOfUrgentBandwidth > max_frac_urgent ||
+ dml2->v20.dml_core_ctx.mp.FractionOfUrgentBandwidthImmediateFlip > max_frac_urgent)) {
+ unsigned int forced_lowest_state_idx = lowest_state_idx;
+
+ while (forced_lowest_state_idx < dml2->v20.dml_core_ctx.states.num_states &&
+ dml2->v20.dml_core_ctx.states.state_array[forced_lowest_state_idx].fabricclk_mhz <= min_fclk_mhz_for_urgent_workaround) {
+ forced_lowest_state_idx += 1;
+ }
+ lowest_state_idx = forced_lowest_state_idx;
+ }
+
out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.mp.Dispclk_calculated * 1000;
out_clks.p_state_supported = s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported;
if (in_dc->config.use_default_clock_table &&
@@ -672,12 +685,14 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
static bool dml2_validate_only(struct dc_state *context)
{
- struct dml2_context *dml2 = context->bw_ctx.dml2;
+ struct dml2_context *dml2;
unsigned int result = 0;
if (!context || context->stream_count == 0)
return true;
+ dml2 = context->bw_ctx.dml2;
+
/* Zero out before each call before proceeding */
memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
@@ -806,6 +821,12 @@ void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
*dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0];
}
+void dml2_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2)
+{
+ if (dml2->architecture == dml2_architecture_21)
+ dml21_prepare_mcache_programming(in_dc, context, dml2);
+}
+
void dml2_copy(struct dml2_context *dst_dml2,
struct dml2_context *src_dml2)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 20b3970c0857..6e3d52eb45c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -230,13 +230,15 @@ struct dml2_configuration_options {
struct socbb_ip_params_external *external_socbb_ip_params;
struct {
bool force_pstate_method_enable;
- enum dml2_force_pstate_methods force_pstate_method_value;
+ enum dml2_force_pstate_methods force_pstate_method_values[MAX_PIPES];
} pmo;
bool map_dc_pipes_with_callbacks;
bool use_clock_dc_limits;
bool gpuvm_enable;
struct dml2_soc_bb *bb_from_dmub;
+ int max_frac_urgent_for_min_fclk_x1000;
+ int min_fclk_for_urgent_workaround_khz;
};
/*
@@ -301,5 +303,5 @@ bool dml2_validate(const struct dc *in_dc,
*/
void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
unsigned int *fclk_change_support, unsigned int *dram_clk_change_support);
-
+void dml2_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2);
#endif //_DML2_WRAPPER_H_
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index aef73bd1221a..d0f8c9ff5232 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -153,58 +153,8 @@ void dpp401_set_cursor_position(
uint32_t height)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
- int x_pos = pos->x - param->recout.x;
- int y_pos = pos->y - param->recout.y;
- int x_hotspot = pos->x_hotspot;
- int y_hotspot = pos->y_hotspot;
- int rec_x_offset = x_pos - pos->x_hotspot;
- int rec_y_offset = y_pos - pos->y_hotspot;
- int cursor_height = (int)height;
- int cursor_width = (int)width;
uint32_t cur_en = pos->enable ? 1 : 0;
- // Transform cursor width / height and hotspots for offset calculations
- if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- swap(cursor_height, cursor_width);
- swap(x_hotspot, y_hotspot);
-
- if (param->rotation == ROTATION_ANGLE_90) {
- // hotspot = (-y, x)
- rec_x_offset = x_pos - (cursor_width - x_hotspot);
- rec_y_offset = y_pos - y_hotspot;
- } else if (param->rotation == ROTATION_ANGLE_270) {
- // hotspot = (y, -x)
- rec_x_offset = x_pos - x_hotspot;
- rec_y_offset = y_pos - (cursor_height - y_hotspot);
- }
- } else if (param->rotation == ROTATION_ANGLE_180) {
- // hotspot = (-x, -y)
- if (!param->mirror)
- rec_x_offset = x_pos - (cursor_width - x_hotspot);
-
- rec_y_offset = y_pos - (cursor_height - y_hotspot);
- }
-
- if (param->rotation == ROTATION_ANGLE_0 && !param->mirror) {
- if (rec_x_offset >= (int)param->recout.width)
- cur_en = 0; /* not visible beyond right edge*/
-
- if (rec_y_offset >= (int)param->recout.height)
- cur_en = 0; /* not visible beyond bottom edge*/
- } else {
- if (rec_x_offset > (int)param->recout.width)
- cur_en = 0; /* not visible beyond right edge*/
-
- if (rec_y_offset > (int)param->recout.height)
- cur_en = 0; /* not visible beyond bottom edge*/
- }
-
- if (rec_x_offset + cursor_width <= 0)
- cur_en = 0; /* not visible beyond left edge*/
-
- if (rec_y_offset + cursor_height <= 0)
- cur_en = 0; /* not visible beyond top edge*/
-
REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index d6b2334d5364..75128fd34306 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -32,16 +32,6 @@
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
-/* Object I/F functions */
-static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
-static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
-static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
- struct dsc_optc_config *dsc_optc_cfg);
-static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
-static void dsc2_disable(struct display_stream_compressor *dsc);
-static void dsc2_disconnect(struct display_stream_compressor *dsc);
-static void dsc2_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
-
static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
@@ -156,7 +146,7 @@ void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
-static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s)
+void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
@@ -173,7 +163,7 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
}
-static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
+bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
struct dsc_optc_config dsc_optc_cfg;
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
@@ -196,7 +186,7 @@ void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_conf
DC_LOG_DSC("\tcolor_depth %d", config->color_depth);
}
-static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg)
{
bool is_config_ok;
@@ -233,7 +223,7 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc
}
-static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
+void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
int dsc_clock_en;
@@ -258,7 +248,7 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
}
-static void dsc2_disable(struct display_stream_compressor *dsc)
+void dsc2_disable(struct display_stream_compressor *dsc)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
int dsc_clock_en;
@@ -277,14 +267,14 @@ static void dsc2_disable(struct display_stream_compressor *dsc)
DSC_CLOCK_EN, 0);
}
-static void dsc2_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
+void dsc2_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
REG_WAIT(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING, 0, 2, 50000);
}
-static void dsc2_disconnect(struct display_stream_compressor *dsc)
+void dsc2_disconnect(struct display_stream_compressor *dsc)
{
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index a136b26c914c..1fb90b52b814 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -454,7 +454,9 @@
type DSCCIF_UPDATE_TAKEN_ACK; \
type DSCRM_DSC_FORWARD_EN; \
type DSCRM_DSC_OPP_PIPE_SOURCE; \
- type DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING
+ type DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \
+ type DSCRM_DSC_FORWARD_EN_STATUS
+
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
@@ -597,5 +599,14 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc,
const struct dsc_config *dsc_cfg,
uint8_t *dsc_packed_pps);
+void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
+void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+ struct dsc_optc_config *dsc_optc_cfg);
+void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
+void dsc2_disable(struct display_stream_compressor *dsc);
+void dsc2_disconnect(struct display_stream_compressor *dsc);
+void dsc2_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index 71d2dff9986d..6f4f5a3c4861 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
@@ -27,6 +27,20 @@
#include "dcn35_dsc.h"
#include "reg_helper.h"
+static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe);
+
+static const struct dsc_funcs dcn35_dsc_funcs = {
+ .dsc_get_enc_caps = dsc2_get_enc_caps,
+ .dsc_read_state = dsc2_read_state,
+ .dsc_validate_stream = dsc2_validate_stream,
+ .dsc_set_config = dsc2_set_config,
+ .dsc_get_packed_pps = dsc2_get_packed_pps,
+ .dsc_enable = dsc35_enable,
+ .dsc_disable = dsc2_disable,
+ .dsc_disconnect = dsc2_disconnect,
+ .dsc_wait_disconnect_pending_clear = dsc2_wait_disconnect_pending_clear,
+};
+
/* Macro definitios for REG_SET macros*/
#define CTX \
dsc20->base.ctx
@@ -49,9 +63,47 @@ void dsc35_construct(struct dcn20_dsc *dsc,
const struct dcn35_dsc_shift *dsc_shift,
const struct dcn35_dsc_mask *dsc_mask)
{
- dsc2_construct(dsc, ctx, inst, dsc_regs,
- (const struct dcn20_dsc_shift *)(dsc_shift),
- (const struct dcn20_dsc_mask *)(dsc_mask));
+ dsc->base.ctx = ctx;
+ dsc->base.inst = inst;
+ dsc->base.funcs = &dcn35_dsc_funcs;
+
+ dsc->dsc_regs = dsc_regs;
+ dsc->dsc_shift = (const struct dcn20_dsc_shift *)(dsc_shift);
+ dsc->dsc_mask = (const struct dcn20_dsc_mask *)(dsc_mask);
+
+ dsc->max_image_width = 5184;
+}
+
+static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
+
+ DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe);
+
+ // TODO: After an idle exit, the HW default values for power control
+ // are changed intermittently due to unknown reasons. There are cases
+ // when dscc memory are still in shutdown state during enablement.
+ // Reset power control to hw default values.
+ REG_UPDATE_2(DSCC_MEM_POWER_CONTROL,
+ DSCC_MEM_PWR_FORCE, 0,
+ DSCC_MEM_PWR_DIS, 0);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
+
+ REG_UPDATE(DSC_TOP_CONTROL,
+ DSC_CLOCK_EN, 1);
+
+ REG_UPDATE_2(DSCRM_DSC_FORWARD_CONFIG,
+ DSCRM_DSC_FORWARD_EN, 1,
+ DSCRM_DSC_OPP_PIPE_SOURCE, opp_pipe);
}
void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 52f23bb554af..6acb6699f146 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -208,7 +208,7 @@ static void dsc401_wait_disconnect_pending_clear(struct display_stream_compresso
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
- REG_WAIT(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING, 0, 2, 50000);
+ REG_WAIT(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN_STATUS, 0, 2, 50000);
}
static void dsc401_disconnect(struct display_stream_compressor *dsc)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
index 2143e81ca22a..3c9fa8988974 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
@@ -196,7 +196,8 @@
DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
+ DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh), \
+ DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN_STATUS, mask_sh)
struct dcn401_dsc_registers {
uint32_t DSC_TOP_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c
index 8901bd80f7d1..5c6f7ddafd6b 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c
@@ -616,7 +616,8 @@ static bool hubbub2_program_watermarks(
hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false)
safe_to_lower = true;
- hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index 6a5af3da4b45..fe741100c0f8 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -339,6 +339,7 @@ bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
return false;
switch (dcc_control) {
+ case dcc_control__256_256:
case dcc_control__256_256_xxx:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 256;
@@ -346,6 +347,7 @@ bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1;
output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
break;
+ case dcc_control__256_128:
case dcc_control__128_128_xxx:
output->grph.rgb.max_uncompressed_blk_size = 128;
output->grph.rgb.max_compressed_blk_size = 128;
@@ -353,6 +355,7 @@ bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1;
output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
break;
+ case dcc_control__256_64:
case dcc_control__256_64_64:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 64;
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index b906db6e7355..7fb5523f9722 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -866,6 +866,7 @@ static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
return false;
switch (dcc_control) {
+ case dcc_control__256_256:
case dcc_control__256_256_xxx:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 256;
@@ -881,12 +882,14 @@ static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
break;
case dcc_control__256_64_64:
+ case dcc_control__256_64:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
output->grph.rgb.dcc_controls.dcc_256_64_64 = 1;
break;
case dcc_control__256_128_128:
+ case dcc_control__256_128:
output->grph.rgb.max_uncompressed_blk_size = 256;
output->grph.rgb.max_compressed_blk_size = 128;
output->grph.rgb.independent_64b_blks = false;
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 054607c944a3..5126d603f0b1 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -824,6 +824,285 @@ void hubbub401_det_request_size(
}
}
}
+bool hubbub401_get_dcc_compression_cap(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ struct dc *dc = hubbub->ctx->dc;
+ /* DCN4_Programming_Guide_DCHUB.docx, Section 5.11.2.2 */
+ enum dcc_control dcc_control;
+ unsigned int plane0_bpe, plane1_bpe;
+ enum segment_order segment_order_horz, segment_order_vert;
+ enum segment_order p1_segment_order_horz, p1_segment_order_vert;
+ bool req128_horz_wc, req128_vert_wc;
+ unsigned int plane0_width = 0, plane0_height = 0, plane1_width = 0, plane1_height = 0;
+ bool p1_req128_horz_wc, p1_req128_vert_wc, is_dual_plane;
+
+ memset(output, 0, sizeof(*output));
+
+ if (dc->debug.disable_dcc == DCC_DISABLE)
+ return false;
+
+ switch (input->format) {
+ default:
+ is_dual_plane = false;
+
+ plane1_width = 0;
+ plane1_height = 0;
+
+ if (input->surface_size.width > 6144 + 16)
+ plane0_width = 6160;
+ else
+ plane0_width = input->surface_size.width;
+
+ if (input->surface_size.height > 6144 + 16)
+ plane0_height = 6160;
+ else
+ plane0_height = input->surface_size.height;
+
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ is_dual_plane = true;
+
+ if (input->surface_size.width > 7680 + 16)
+ plane0_width = 7696;
+ else
+ plane0_width = input->surface_size.width;
+
+ if (input->surface_size.height > 4320 + 16)
+ plane0_height = 4336;
+ else
+ plane0_height = input->surface_size.height;
+
+ if (input->plane1_size.width > 7680 + 16)
+ plane1_width = 7696 / 2;
+ else
+ plane1_width = input->plane1_size.width;
+
+ if (input->plane1_size.height > 4320 + 16)
+ plane1_height = 4336 / 2;
+ else
+ plane1_height = input->plane1_size.height;
+
+ break;
+
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ is_dual_plane = true;
+
+ if (input->surface_size.width > 5120 + 16)
+ plane0_width = 5136;
+ else
+ plane0_width = input->surface_size.width;
+
+ if (input->surface_size.height > 5120 + 16)
+ plane0_height = 5136;
+ else
+ plane0_height = input->surface_size.height;
+
+ if (input->plane1_size.width > 5120 + 16)
+ plane1_width = 5136;
+ else
+ plane1_width = input->plane1_size.width;
+
+ if (input->plane1_size.height > 5120 + 16)
+ plane1_height = 5136;
+ else
+ plane1_height = input->plane1_size.height;
+
+ break;
+ }
+
+ if (!hubbub->funcs->dcc_support_pixel_format_plane0_plane1(input->format,
+ &plane0_bpe, &plane1_bpe))
+ return false;
+
+ /* Find plane0 DCC Controls */
+ if (!is_dual_plane) {
+
+ if (!hubbub->funcs->dcc_support_swizzle_addr3(input->swizzle_mode_addr3,
+ input->plane0_pitch, plane0_bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ hubbub401_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, input->format,
+ plane0_height, plane0_width, plane0_bpe,
+ plane1_height, plane1_width, plane1_bpe,
+ &req128_horz_wc, &req128_vert_wc, &p1_req128_horz_wc, &p1_req128_vert_wc);
+
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__256_128;
+ else
+ dcc_control = dcc_control__256_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__256_128;
+ else
+ dcc_control = dcc_control__256_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64;
+ else
+ /* req128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__256_128;
+ }
+
+ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+ dcc_control != dcc_control__256_256)
+ return false;
+
+ switch (dcc_control) {
+ case dcc_control__256_256:
+ output->grph.rgb.dcc_controls.dcc_256_256 = 1;
+ output->grph.rgb.dcc_controls.dcc_256_128 = 1;
+ output->grph.rgb.dcc_controls.dcc_256_64 = 1;
+ break;
+ case dcc_control__256_128:
+ output->grph.rgb.dcc_controls.dcc_256_128 = 1;
+ output->grph.rgb.dcc_controls.dcc_256_64 = 1;
+ break;
+ case dcc_control__256_64:
+ output->grph.rgb.dcc_controls.dcc_256_64 = 1;
+ break;
+ default:
+ /* Shouldn't get here */
+ ASSERT(0);
+ break;
+ }
+ } else {
+ /* For dual plane cases, need to examine both planes together */
+ if (!hubbub->funcs->dcc_support_swizzle_addr3(input->swizzle_mode_addr3,
+ input->plane0_pitch, plane0_bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ if (!hubbub->funcs->dcc_support_swizzle_addr3(input->swizzle_mode_addr3,
+ input->plane1_pitch, plane1_bpe,
+ &p1_segment_order_horz, &p1_segment_order_vert))
+ return false;
+
+ hubbub401_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, input->format,
+ plane0_height, plane0_width, plane0_bpe,
+ plane1_height, plane1_width, plane1_bpe,
+ &req128_horz_wc, &req128_vert_wc, &p1_req128_horz_wc, &p1_req128_vert_wc);
+
+ /* Determine Plane 0 DCC Controls */
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__256_128;
+ else
+ dcc_control = dcc_control__256_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__256_128;
+ else
+ dcc_control = dcc_control__256_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64;
+ else
+ /* req128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__256_128;
+ }
+
+ switch (dcc_control) {
+ case dcc_control__256_256:
+ output->video.luma.dcc_controls.dcc_256_256 = 1;
+ output->video.luma.dcc_controls.dcc_256_128 = 1;
+ output->video.luma.dcc_controls.dcc_256_64 = 1;
+ break;
+ case dcc_control__256_128:
+ output->video.luma.dcc_controls.dcc_256_128 = 1;
+ output->video.luma.dcc_controls.dcc_256_64 = 1;
+ break;
+ case dcc_control__256_64:
+ output->video.luma.dcc_controls.dcc_256_64 = 1;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ /* Determine Plane 1 DCC Controls */
+ if (!p1_req128_horz_wc && !p1_req128_vert_wc) {
+ dcc_control = dcc_control__256_256;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!p1_req128_horz_wc)
+ dcc_control = dcc_control__256_256;
+ else if (p1_segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__256_128;
+ else
+ dcc_control = dcc_control__256_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!p1_req128_vert_wc)
+ dcc_control = dcc_control__256_256;
+ else if (p1_segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__256_128;
+ else
+ dcc_control = dcc_control__256_64;
+ } else {
+ if ((p1_req128_horz_wc &&
+ p1_segment_order_horz == segment_order__non_contiguous) ||
+ (p1_req128_vert_wc &&
+ p1_segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64;
+ else
+ /* req128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__256_128;
+ }
+
+ switch (dcc_control) {
+ case dcc_control__256_256:
+ output->video.chroma.dcc_controls.dcc_256_256 = 1;
+ output->video.chroma.dcc_controls.dcc_256_128 = 1;
+ output->video.chroma.dcc_controls.dcc_256_64 = 1;
+ break;
+ case dcc_control__256_128:
+ output->video.chroma.dcc_controls.dcc_256_128 = 1;
+ output->video.chroma.dcc_controls.dcc_256_64 = 1;
+ break;
+ case dcc_control__256_64:
+ output->video.chroma.dcc_controls.dcc_256_64 = 1;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ output->capable = true;
+ return true;
+}
static void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg)
{
@@ -891,6 +1170,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.init_vm_ctx = hubbub2_init_vm_ctx,
.dcc_support_swizzle_addr3 = hubbub401_dcc_support_swizzle,
.dcc_support_pixel_format_plane0_plane1 = hubbub401_dcc_support_pixel_format,
+ .get_dcc_compression_cap = hubbub401_get_dcc_compression_cap,
.wm_read_state = hubbub401_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub401_program_watermarks,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h
index d8a57f64a70c..f35f19ba3e18 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h
@@ -180,6 +180,11 @@ void hubbub401_det_request_size(
bool *p0_req128_vert_wc,
bool *p1_req128_horz_wc,
bool *p1_req128_vert_wc);
+bool hubbub401_get_dcc_compression_cap(
+ struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
struct dc_context *ctx,
const struct dcn_hubbub_registers *hubbub_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index ecc0a2f37938..18e194507e36 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -175,6 +175,8 @@
uint32_t HUBP_3DLUT_ADDRESS_LOW;\
uint32_t HUBP_3DLUT_CONTROL;\
uint32_t HUBP_3DLUT_DLG_PARAM;\
+ uint32_t DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE;\
+ uint32_t DCHUBP_MCACHEID_CONFIG
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
@@ -269,6 +271,18 @@
type HUBP_3DLUT_ADDRESS_HIGH;\
type HUBP_3DLUT_ADDRESS_LOW;\
type REFCYC_PER_3DLUT_GROUP;\
+ type VIEWPORT_MCACHE_SPLIT_COORDINATE;\
+ type VIEWPORT_MCACHE_SPLIT_COORDINATE_C;\
+ type MCACHEID_REG_READ_1H_P0;\
+ type MCACHEID_REG_READ_2H_P0;\
+ type MCACHEID_REG_READ_1H_P1;\
+ type MCACHEID_REG_READ_2H_P1;\
+ type MCACHEID_MALL_PREF_1H_P0;\
+ type MCACHEID_MALL_PREF_2H_P0;\
+ type MCACHEID_MALL_PREF_1H_P1;\
+ type MCACHEID_MALL_PREF_2H_P1
+
+
struct dcn_hubp2_registers {
DCN401_HUBP_REG_COMMON_VARIABLE_LIST;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 6692d57d5cce..f0c45a74c2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -626,6 +626,26 @@ void hubp401_set_viewport(
SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
+void hubp401_program_mcache_id_and_split_coordinate(
+ struct hubp *hubp,
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_SET_8(DCHUBP_MCACHEID_CONFIG, 0,
+ MCACHEID_REG_READ_1H_P0, mcache_regs->main.p0.mcache_id_first,
+ MCACHEID_REG_READ_2H_P0, mcache_regs->main.p0.mcache_id_second,
+ MCACHEID_REG_READ_1H_P1, mcache_regs->main.p1.mcache_id_first,
+ MCACHEID_REG_READ_2H_P1, mcache_regs->main.p1.mcache_id_second,
+ MCACHEID_MALL_PREF_1H_P0, mcache_regs->mall.p0.mcache_id_first,
+ MCACHEID_MALL_PREF_2H_P0, mcache_regs->mall.p0.mcache_id_second,
+ MCACHEID_MALL_PREF_1H_P1, mcache_regs->mall.p1.mcache_id_first,
+ MCACHEID_MALL_PREF_2H_P1, mcache_regs->mall.p1.mcache_id_second);
+
+ REG_SET_2(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, 0,
+ VIEWPORT_MCACHE_SPLIT_COORDINATE, mcache_regs->main.p0.split_location,
+ VIEWPORT_MCACHE_SPLIT_COORDINATE_C, mcache_regs->main.p1.split_location);
+}
void hubp401_set_flip_int(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -654,13 +674,11 @@ void hubp401_cursor_set_position(
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
int x_pos = pos->x - param->recout.x;
int y_pos = pos->y - param->recout.y;
- int x_hotspot = pos->x_hotspot;
- int y_hotspot = pos->y_hotspot;
int rec_x_offset = x_pos - pos->x_hotspot;
int rec_y_offset = y_pos - pos->y_hotspot;
- int cursor_height = (int)hubp->curs_attr.height;
- int cursor_width = (int)hubp->curs_attr.width;
- uint32_t dst_x_offset;
+ int dst_x_offset;
+ int x_pos_viewport = x_pos * param->viewport.width / param->recout.width;
+ int x_hot_viewport = pos->x_hotspot * param->viewport.width / param->recout.width;
uint32_t cur_en = pos->enable ? 1 : 0;
hubp->curs_pos = *pos;
@@ -672,29 +690,13 @@ void hubp401_cursor_set_position(
if (hubp->curs_attr.address.quad_part == 0)
return;
- // Transform cursor width / height and hotspots for offset calculations
- if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- swap(cursor_height, cursor_width);
- swap(x_hotspot, y_hotspot);
-
- if (param->rotation == ROTATION_ANGLE_90) {
- // hotspot = (-y, x)
- rec_x_offset = x_pos - (cursor_width - x_hotspot);
- rec_y_offset = y_pos - y_hotspot;
- } else if (param->rotation == ROTATION_ANGLE_270) {
- // hotspot = (y, -x)
- rec_x_offset = x_pos - x_hotspot;
- rec_y_offset = y_pos - (cursor_height - y_hotspot);
- }
- } else if (param->rotation == ROTATION_ANGLE_180) {
- // hotspot = (-x, -y)
- if (!param->mirror)
- rec_x_offset = x_pos - (cursor_width - x_hotspot);
-
- rec_y_offset = y_pos - (cursor_height - y_hotspot);
- }
-
- dst_x_offset = (rec_x_offset >= 0) ? rec_x_offset : 0;
+ /* Translate the x position of the cursor from rect
+ * space into viewport space. CURSOR_DST_X_OFFSET
+ * is the offset relative to viewport start position.
+ */
+ dst_x_offset = x_pos_viewport - x_hot_viewport *
+ (1 + hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION);
+ dst_x_offset = (dst_x_offset >= 0) ? dst_x_offset : 0;
dst_x_offset *= param->ref_clk_khz;
dst_x_offset /= param->pixel_clk_khz;
@@ -705,18 +707,6 @@ void hubp401_cursor_set_position(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
- if (rec_x_offset >= (int)param->recout.width)
- cur_en = 0; /* not visible beyond right edge*/
-
- if (rec_x_offset + cursor_width <= 0)
- cur_en = 0; /* not visible beyond left edge*/
-
- if (rec_y_offset >= (int)param->recout.height)
- cur_en = 0; /* not visible beyond bottom edge*/
-
- if (rec_y_offset + cursor_height <= 0)
- cur_en = 0; /* not visible beyond top edge*/
-
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
@@ -993,6 +983,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp401_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
+ .hubp_program_mcache_id_and_split_coordinate = hubp401_program_mcache_id_and_split_coordinate,
.hubp_update_3dlut_fl_bias_scale = hubp401_update_3dlut_fl_bias_scale,
.hubp_program_3dlut_fl_mode = hubp401_program_3dlut_fl_mode,
.hubp_program_3dlut_fl_format = hubp401_program_3dlut_fl_format,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index e0cec898a2c0..e52fdb5b0cd0 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -243,6 +243,16 @@
HUBP_SF(CURSOR0_0_HUBP_3DLUT_ADDRESS_HIGH, HUBP_3DLUT_ADDRESS_HIGH, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_ADDRESS_LOW, HUBP_3DLUT_ADDRESS_LOW, mask_sh),\
HUBP_SF(CURSOR0_0_HUBP_3DLUT_DLG_PARAM, REFCYC_PER_3DLUT_GROUP, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, VIEWPORT_MCACHE_SPLIT_COORDINATE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, VIEWPORT_MCACHE_SPLIT_COORDINATE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_REG_READ_1H_P0, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_REG_READ_2H_P0, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_REG_READ_1H_P1, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_REG_READ_2H_P1, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P0, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P0, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P1, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh)
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
@@ -302,7 +312,9 @@ void hubp401_program_surface_config(
void hubp401_set_viewport(struct hubp *hubp,
const struct rect *viewport,
const struct rect *viewport_c);
-
+void hubp401_program_mcache_id_and_split_coordinate(
+ struct hubp *hubp,
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs);
void hubp401_set_flip_int(struct hubp *hubp);
bool hubp401_in_blank(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 35151dd056cb..4ef329a4d764 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -1134,26 +1134,9 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
{
struct hubp *hubp ;
unsigned int i;
- bool need_recover = true;
if (!dc->debug.recovery_enabled)
return false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx != NULL) {
- hubp = pipe_ctx->plane_res.hubp;
- if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
- if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
- /* one pipe underflow, we will reset all the pipes*/
- need_recover = true;
- }
- }
- }
- }
- if (!need_recover)
- return false;
/*
DCHUBP_CNTL:HUBP_BLANK_EN=1
DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 82d1ded09561..2532ad410cb5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -974,8 +974,8 @@ enum dc_status dcn20_enable_stream_timing(
/* TODO enable stream if timing changed */
/* TODO unblank stream if DP */
- if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
- if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
+ if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
@@ -1753,6 +1753,10 @@ static void dcn20_update_dchubp_dpp(
&pipe_ctx->plane_res.scl_data.viewport_c);
viewport_changed = true;
}
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(
+ hubp,
+ &pipe_ctx->mcache_regs);
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
@@ -1827,8 +1831,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
/* If the stream paired with this plane is phantom, the plane is also phantom */
- if (pipe_ctx->stream && pipe_mall_type == SUBVP_PHANTOM
- && hubp->funcs->phantom_hubp_post_enable)
+ if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable)
hubp->funcs->phantom_hubp_post_enable(hubp);
}
@@ -2228,6 +2231,29 @@ void dcn20_post_unlock_program_front_end(
}
}
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* When going from a smaller ODM slice count to larger, we must ensure double
+ * buffer update completes before we return to ensure we don't reduce DISPCLK
+ * before we've transitioned to 2:1 or 4:1
+ */
+ if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ int j = 0;
+ struct timing_generator *tg = pipe->stream_res.tg;
+
+
+ if (tg->funcs->get_double_buffer_pending) {
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
+ && tg->funcs->get_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+ }
+ }
+
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index bcacfd893cf7..eaeeade31ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -228,8 +228,11 @@ bool dcn30_set_blend_lut(
if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
blend_lut = &plane_state->blend_tf.pwl;
else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm3_helper_translate_curve_to_hw_format(
+ result = cm3_helper_translate_curve_to_hw_format(
&plane_state->blend_tf, &dpp_base->regamma_params, false);
+ if (!result)
+ return result;
+
blend_lut = &dpp_base->regamma_params;
}
result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index bdbb4a71651f..05d8f81daa06 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -483,11 +483,14 @@ bool dcn32_set_mcm_luts(
if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
lut_params = &plane_state->blend_tf.pwl;
else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
+ result = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
&dpp_base->regamma_params, false);
+ if (!result)
+ return result;
+
lut_params = &dpp_base->regamma_params;
}
- result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
+ mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
lut_params = NULL;
// Shaper
@@ -501,7 +504,7 @@ bool dcn32_set_mcm_luts(
lut_params = &dpp_base->shaper_params;
}
- result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
+ mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
// 3D
if (plane_state->lut3d_func.state.bits.initialized == 1)
@@ -1254,6 +1257,8 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
+
+ dc_trigger_sync(dc, dc->current_state);
}
void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 7a456618f313..11570ef06086 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -408,6 +408,8 @@ void dcn401_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
+ dcn401_setup_hpo_hw_control(hws, true);
+
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
@@ -500,7 +502,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
/* 1D LUT */
- if (mcm_luts.lut1d_func) {
+ if (mcm_luts.lut1d_func && lut3d_xable != MCM_LUT_DISABLE) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
@@ -672,7 +674,7 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
// 1D LUT
- if (!plane_state->mcm_lut1d_enable) {
+ if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) {
if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
lut_params = &plane_state->blend_tf.pwl;
else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
@@ -916,8 +918,8 @@ enum dc_status dcn401_enable_stream_timing(
/* TODO enable stream if timing changed */
/* TODO unblank stream if DP */
- if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
- if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
+ if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
@@ -1099,31 +1101,21 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
.rotation = pipe_ctx->plane_state->rotation,
- .mirror = pipe_ctx->plane_state->horizontal_mirror
+ .mirror = pipe_ctx->plane_state->horizontal_mirror,
+ .stream = pipe_ctx->stream
};
- bool pipe_split_on = false;
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
(pipe_ctx->prev_odm_pipe != NULL);
int prev_odm_width = 0;
int prev_odm_offset = 0;
- int next_odm_width = 0;
- int next_odm_offset = 0;
- struct pipe_ctx *next_odm_pipe = NULL;
struct pipe_ctx *prev_odm_pipe = NULL;
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ int recout_x_pos = 0;
+ int recout_y_pos = 0;
- if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
- if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
- (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
- pipe_split_on = true;
- }
- }
-
-
- /**
- * DCN4 moved cursor composition after Scaler, so in HW it is in
+ /* DCN4 moved cursor composition after Scaler, so in HW it is in
* recout space and for HW Cursor position programming need to
* translate to recout space.
*
@@ -1148,8 +1140,7 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
pipe_ctx->stream->src.height;
- /**
- * If the cursor's source viewport is clipped then we need to
+ /* If the cursor's source viewport is clipped then we need to
* translate the cursor to appear in the correct position on
* the screen.
*
@@ -1169,32 +1160,32 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
* next/prev_odm_offset is to account for scaled modes that have underscan
*/
if (odm_combine_on) {
- next_odm_pipe = pipe_ctx->next_odm_pipe;
prev_odm_pipe = pipe_ctx->prev_odm_pipe;
- while (next_odm_pipe != NULL) {
- next_odm_width += next_odm_pipe->plane_res.scl_data.recout.width;
- next_odm_offset += next_odm_pipe->plane_res.scl_data.recout.x;
- next_odm_pipe = next_odm_pipe->next_odm_pipe;
- }
while (prev_odm_pipe != NULL) {
prev_odm_width += prev_odm_pipe->plane_res.scl_data.recout.width;
prev_odm_offset += prev_odm_pipe->plane_res.scl_data.recout.x;
prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
}
- if (param.rotation == ROTATION_ANGLE_0) {
- x_pos -= (prev_odm_width + prev_odm_offset);
- }
+ x_pos -= (prev_odm_width + prev_odm_offset);
}
- /**
- * If the position is negative then we need to add to the hotspot
- * to shift the cursor outside the plane.
+ /* If the position is negative then we need to add to the hotspot
+ * to fix cursor size between ODM slices
*/
if (x_pos < 0) {
pos_cpy.x_hotspot -= x_pos;
+ if ((odm_combine_on) && (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)) {
+ if (hubp->curs_attr.width <= 128) {
+ pos_cpy.x_hotspot /= 2;
+ pos_cpy.x_hotspot += 1;
+ } else {
+ pos_cpy.x_hotspot /= 2;
+ pos_cpy.x_hotspot += 2;
+ }
+ }
x_pos = 0;
}
@@ -1209,86 +1200,23 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (pos_cpy.enable && dcn401_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
- if (param.rotation == ROTATION_ANGLE_270) {
- // Swap axis and mirror vertically
- uint32_t temp_x = pos_cpy.x;
+ x_pos = pos_cpy.x - param.recout.x;
+ y_pos = pos_cpy.y - param.recout.y;
- int recout_height =
- pipe_ctx->plane_res.scl_data.recout.height;
- int recout_y =
- pipe_ctx->plane_res.scl_data.recout.y;
+ recout_x_pos = x_pos - pos_cpy.x_hotspot;
+ recout_y_pos = y_pos - pos_cpy.y_hotspot;
- /**
- * Display groups that are 1xnY, have pos_cpy.x > 2 * recout.height
- * For pipe split cases:
- * - apply offset of recout.y to normalize pos_cpy.x
- * - calculate the pos_cpy.y as before
- * - shift pos_cpy.y back by same offset to get final value
- * - since we iterate through both pipes, use the lower
- * recout.y for offset
- * For non pipe split cases, use the same calculation for
- * pos_cpy.y as the 180 degree rotation case below,
- * but use pos_cpy.x as our input because we are rotating
- * 270 degrees
- */
- if (pipe_split_on || odm_combine_on) {
- int pos_cpy_x_offset;
- int other_pipe_recout_y;
-
- if (pipe_split_on) {
- if (pipe_ctx->bottom_pipe) {
- other_pipe_recout_y =
- pipe_ctx->bottom_pipe->plane_res.scl_data.recout.y;
- } else {
- other_pipe_recout_y =
- pipe_ctx->top_pipe->plane_res.scl_data.recout.y;
- }
- pos_cpy_x_offset = (recout_y > other_pipe_recout_y) ?
- other_pipe_recout_y : recout_y;
- pos_cpy.x -= pos_cpy_x_offset;
- if (pos_cpy.x > recout_height) {
- pos_cpy.x = pos_cpy.x - recout_height;
- pos_cpy.y = recout_height - pos_cpy.x;
- } else {
- pos_cpy.y = 2 * recout_height - pos_cpy.x;
- }
- pos_cpy.y += pos_cpy_x_offset;
+ if (recout_x_pos >= (int)param.recout.width)
+ pos_cpy.enable = false; /* not visible beyond right edge*/
- } else {
- pos_cpy.x = pipe_ctx->plane_res.scl_data.recout.width + next_odm_width + next_odm_offset - pos_cpy.y;
- pos_cpy.y = temp_x;
- }
- }
- } else if (param.rotation == ROTATION_ANGLE_180) {
- // Mirror horizontally and vertically
- int recout_width =
- pipe_ctx->plane_res.scl_data.recout.width;
- int recout_x =
- pipe_ctx->plane_res.scl_data.recout.x;
-
- if (!param.mirror) {
- if (odm_combine_on) {
- pos_cpy.x = pipe_ctx->plane_res.scl_data.recout.width + next_odm_width - pos_cpy.x;
- } else if (pipe_split_on) {
- if (pos_cpy.x >= recout_width + recout_x) {
- pos_cpy.x = 2 * recout_width
- - pos_cpy.x + 2 * recout_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * recout_x - pos_cpy.x;
- if (temp_x >= recout_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + recout_width;
- }
- }
- }
+ if (recout_y_pos >= (int)param.recout.height)
+ pos_cpy.enable = false; /* not visible beyond bottom edge*/
- }
+ if (recout_x_pos + (int)hubp->curs_attr.width <= 0)
+ pos_cpy.enable = false; /* not visible beyond left edge*/
- }
+ if (recout_y_pos + (int)hubp->curs_attr.height <= 0)
+ pos_cpy.enable = false; /* not visible beyond top edge*/
hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
@@ -1408,6 +1336,31 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
return true;
}
+void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
+ const struct pipe_ctx *top_pipe)
+{
+ bool is_wait_needed = false;
+ const struct pipe_ctx *pipe_ctx = top_pipe;
+
+ /* check if any surfaces are updating address while using flip immediate and dcc */
+ while (pipe_ctx != NULL) {
+ if (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->dcc.enable &&
+ pipe_ctx->plane_state->flip_immediate &&
+ pipe_ctx->plane_state->update_flags.bits.addr_update) {
+ is_wait_needed = true;
+ break;
+ }
+
+ /* check next pipe */
+ pipe_ctx = pipe_ctx->bottom_pipe;
+ }
+
+ if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
+ udelay(dc->debug.dcc_meta_propagation_delay_us);
+ }
+}
+
void dcn401_prepare_bandwidth(struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index bada43d4b2eb..c1d4287d5a0d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -61,6 +61,8 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable);
struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc,
uint8_t region);
+void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
+ const struct pipe_ctx *top_pipe_to_program);
void dcn401_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index 1cf0608e1980..6a768702c7bd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -94,6 +94,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.update_dsc_pg = dcn32_update_dsc_pg,
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
.blank_phantom = dcn32_blank_phantom,
+ .wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
.fams2_global_control_lock = dcn401_fams2_global_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
@@ -137,7 +138,6 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
.update_force_pstate = dcn32_update_force_pstate,
.update_mall_sel = dcn32_update_mall_sel,
- .setup_hpo_hw_control = dcn401_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index e9b85884edce..d05be65a2256 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -141,6 +141,11 @@ struct subvp_save_surf_addr {
uint8_t subvp_index;
};
+struct wait_for_dcc_meta_propagation_params {
+ const struct dc *dc;
+ const struct pipe_ctx *top_pipe_to_program;
+};
+
struct fams2_global_control_lock_fast_params {
struct dc *dc;
bool is_required;
@@ -165,6 +170,7 @@ union block_sequence_params {
struct set_output_csc_params set_output_csc_params;
struct set_ocsc_default_params set_ocsc_default_params;
struct subvp_save_surf_addr subvp_save_surf_addr;
+ struct wait_for_dcc_meta_propagation_params wait_for_dcc_meta_propagation_params;
struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params;
};
@@ -186,6 +192,7 @@ enum block_sequence_func {
MPC_SET_OUTPUT_CSC,
MPC_SET_OCSC_DEFAULT,
DMUB_SUBVP_SAVE_SURF_ADDR,
+ HUBP_WAIT_FOR_DCC_META_PROP,
DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
};
@@ -443,6 +450,8 @@ struct hw_sequencer_funcs {
bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx);
+ void (*wait_for_dcc_meta_propagation)(const struct dc *dc,
+ const struct pipe_ctx *top_pipe_to_program);
void (*fams2_global_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f58c27ad8b3e..4c8e6436c7e1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -97,6 +97,9 @@ struct resource_funcs {
unsigned int (*calculate_mall_ways_from_bytes)(
const struct dc *dc,
unsigned int total_size_in_mall_bytes);
+ void (*prepare_mcache_programming)(
+ struct dc *dc,
+ struct dc_state *context);
/**
* @populate_dml_pipes - Populate pipe data struct
*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index a73cb8f731b3..dd2b2864876c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -40,6 +40,10 @@ enum dcc_control {
dcc_control__128_128_xxx,
dcc_control__256_64_64,
dcc_control__256_128_128,
+ dcc_control__256_256,
+ dcc_control__256_128,
+ dcc_control__256_64,
+
};
enum segment_order {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index bcd7b22a1627..16580d624278 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -257,6 +257,7 @@ struct hubp_funcs {
unsigned int min_dst_y_next_start_optimized);
void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
+ void (*hubp_program_mcache_id_and_split_coordinate)(struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs);
void (*hubp_update_3dlut_fl_bias_scale)(struct hubp *hubp, uint16_t bias, uint16_t scale);
void (*hubp_program_3dlut_fl_mode)(struct hubp *hubp,
enum hubp_3dlut_fl_mode mode);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index cd4826f329c1..0f453452234c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -340,6 +340,7 @@ struct timing_generator_funcs {
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
+ bool (*get_double_buffer_pending)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 8d1a1cc94a8b..555c1c484cfd 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -853,7 +853,7 @@ bool dp_set_test_pattern(
CRTC_STATE_VACTIVE);
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
- if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_lock(pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 8073fdae9cb1..8246006857b3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -610,14 +610,14 @@ static bool construct_phy(struct dc_link *link,
link->link_enc =
link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
- DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
- DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
-
if (!link->link_enc) {
DC_ERROR("Failed to create link encoder!\n");
goto link_enc_create_fail;
}
+ DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+ DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
+
/* Update link encoder tracking variables. These are used for the dynamic
* assignment of link encoders to streams.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index f1cac74dd7f7..46bb7a855bc2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -409,9 +409,6 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
case LINK_RATE_HIGH3:
lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
break;
- default:
- // Assume all LTTPRs support up to HBR3 to improve misbehaving sink interop
- lttpr_max_link_rate = LINK_RATE_HIGH3;
}
if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
@@ -2137,15 +2134,19 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
if (dp_is_lttpr_present(link)) {
- if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
- max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
- lttpr_max_link_rate = get_lttpr_max_link_rate(link);
- if (lttpr_max_link_rate < max_link_cap.link_rate)
- max_link_cap.link_rate = lttpr_max_link_rate;
+ /* Some LTTPR devices do not report valid DPCD revisions, if so, do not take it's link cap into consideration. */
+ if (link->dpcd_caps.lttpr_caps.revision.raw >= DPCD_REV_14) {
+ if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+ lttpr_max_link_rate = get_lttpr_max_link_rate(link);
- if (!link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
- is_uhbr13_5_supported = false;
+ if (lttpr_max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = lttpr_max_link_rate;
+
+ if (!link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
+ is_uhbr13_5_supported = false;
+ }
DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
__func__,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 5a0f574056d4..988999c44475 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1663,8 +1663,7 @@ bool perform_link_training_with_retries(
if (status == LINK_TRAINING_ABORT) {
enum dc_connection_type type = dc_connection_none;
- link_detect_connection_type(link, &type);
- if (type == dc_connection_none) {
+ if (link_detect_connection_type(link, &type) && type == dc_connection_none) {
DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index e3e70c1db040..369a13244e5e 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -562,7 +562,8 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_FORMAT;\
type OTG_V_TOTAL_LAST_USED_BY_DRR;\
type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
- type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
+ type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;\
+ type OPTC_DOUBLE_BUFFER_PENDING;\
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 6c837409df42..00094f0e8470 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -297,6 +297,18 @@ static void optc32_set_drr(
optc32_setup_manual_trigger(optc);
}
+bool optc32_get_double_buffer_pending(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t update_pending = 0;
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_DOUBLE_BUFFER_PENDING,
+ &update_pending);
+
+ return (update_pending == 1);
+}
+
static struct timing_generator_funcs dcn32_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -361,6 +373,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .get_double_buffer_pending = optc32_get_double_buffer_pending,
};
void dcn32_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 0c2c14695561..665d7c52f67c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -116,6 +116,7 @@
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_DOUBLE_BUFFER_PENDING, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
@@ -184,5 +185,6 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
+bool optc32_get_double_buffer_pending(struct timing_generator *optc);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index fd1c8b45c40e..9f5c2efa7560 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -459,6 +459,7 @@ static struct timing_generator_funcs dcn401_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .get_double_buffer_pending = optc32_get_double_buffer_pending,
};
void dcn401_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
index 1671fdd5061c..3114ecef332a 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
@@ -94,6 +94,7 @@
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_DOUBLE_BUFFER_PENDING, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index e783afbbb397..5e7cfa8e8ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2200,10 +2200,11 @@ bool dcn20_get_dcc_compression_cap(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output)
{
- return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
- dc->res_pool->hubbub,
- input,
- output);
+ if (dc->res_pool->hubbub->funcs->get_dcc_compression_cap)
+ return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
+ dc->res_pool->hubbub, input, output);
+
+ return false;
}
static void dcn20_destroy_resource_pool(struct resource_pool **pool)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 0094ef223c5d..1ce0f9ecff9c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -721,7 +721,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
.disable_optc_power_gate = true, /*should the same as above two*/
- .disable_hpo_power_gate = false, /*dmubfw force domain25 on*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
.disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
@@ -2152,8 +2152,9 @@ static bool dcn35_resource_construct(
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
dc->dml2_options.max_segments_per_hubp = 24;
-
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
+ dc->dml2_options.max_frac_urgent_for_min_fclk_x1000 = 900;
+ dc->dml2_options.min_fclk_for_urgent_workaround_khz = 400 * 1000;
if (dc->config.sdpif_request_limit_words_per_umc == 0)
dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 74fb21b88f12..4e27d2cee9fb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -1617,6 +1617,14 @@ bool dcn401_validate_bandwidth(struct dc *dc,
return out;
}
+void dcn401_prepare_mcache_programming(struct dc *dc,
+ struct dc_state *context)
+{
+ if (dc->debug.using_dml21)
+ dml2_prepare_mcache_programming(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2);
+}
+
static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_stream_state *stream = pipe_ctx->stream;
@@ -1699,6 +1707,7 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.patch_unknown_plane_state = dcn401_patch_unknown_plane_state,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
+ .prepare_mcache_programming = dcn401_prepare_mcache_programming,
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index c04c8b8f2114..26efeada4f41 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -26,6 +26,8 @@ bool dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
+void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
+
/* Following are definitions for run time init of reg offsets */
/* HUBP */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 182e7532dda8..1e495e884484 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -432,18 +432,18 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
goto out;
}
- if (status == MOD_HDCP_STATUS_SUCCESS)
- mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
- &input->bstatus_read, &status,
- hdcp, "bstatus_read");
- if (status == MOD_HDCP_STATUS_SUCCESS)
- mod_hdcp_execute_and_set(check_link_integrity_dp,
- &input->link_integrity_check, &status,
- hdcp, "link_integrity_check");
- if (status == MOD_HDCP_STATUS_SUCCESS)
- mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
- &input->reauth_request_check, &status,
- hdcp, "reauth_request_check");
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integrity_check, &status,
+ hdcp, "link_integrity_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
out:
return status;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index 1b2df97226a3..7ecf76aea950 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -161,8 +161,7 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
return MOD_HDCP_STATUS_DDC_FAILURE;
if (is_dp_hdcp(hdcp)) {
- int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
- sizeof(hdcp_dpcd_addrs[0]);
+ int num_dpcd_addrs = ARRAY_SIZE(hdcp_dpcd_addrs);
if (msg_id >= num_dpcd_addrs)
return MOD_HDCP_STATUS_DDC_FAILURE;
@@ -180,8 +179,7 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
data_offset += cur_size;
}
} else {
- int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
- sizeof(hdcp_i2c_offsets[0]);
+ int num_i2c_offsets = ARRAY_SIZE(hdcp_i2c_offsets);
if (msg_id >= num_i2c_offsets)
return MOD_HDCP_STATUS_DDC_FAILURE;
@@ -234,8 +232,7 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
return MOD_HDCP_STATUS_DDC_FAILURE;
if (is_dp_hdcp(hdcp)) {
- int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
- sizeof(hdcp_dpcd_addrs[0]);
+ int num_dpcd_addrs = ARRAY_SIZE(hdcp_dpcd_addrs);
if (msg_id >= num_dpcd_addrs)
return MOD_HDCP_STATUS_DDC_FAILURE;
@@ -254,8 +251,7 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
data_offset += cur_size;
}
} else {
- int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
- sizeof(hdcp_i2c_offsets[0]);
+ int num_i2c_offsets = ARRAY_SIZE(hdcp_i2c_offsets);
if (msg_id >= num_i2c_offsets)
return MOD_HDCP_STATUS_DDC_FAILURE;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 7c9805705fd3..8c137d7c032e 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -513,9 +513,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
- if (!display)
- return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
-
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 36ee9d3d6d9c..f5b725f10a7c 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -86,6 +86,7 @@ enum amd_apu_flags {
* @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine
* @AMD_IP_BLOCK_TYPE_VPE: Video Processing Engine
* @AMD_IP_BLOCK_TYPE_UMSCH_MM: User Mode Schduler for Multimedia
+* @AMD_IP_BLOCK_TYPE_ISP: Image Signal Processor
* @AMD_IP_BLOCK_TYPE_NUM: Total number of IP block types
*/
enum amd_ip_block_type {
@@ -105,6 +106,7 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_JPEG,
AMD_IP_BLOCK_TYPE_VPE,
AMD_IP_BLOCK_TYPE_UMSCH_MM,
+ AMD_IP_BLOCK_TYPE_ISP,
AMD_IP_BLOCK_TYPE_NUM,
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
index 5dabf0abccce..15e5a65cf492 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
@@ -1777,6 +1777,8 @@
#define regDCHUBBUB_SDPIF_MEM_PWR_CTRL_BASE_IDX 2
#define regDCHUBBUB_SDPIF_MEM_PWR_STATUS 0x0487
#define regDCHUBBUB_SDPIF_MEM_PWR_STATUS_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL 0x0488
+#define regDCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL_BASE_IDX 2
// addressBlock: dcn_dcec_dchubbubl_hubbub_ret_path_dispdec
@@ -2089,6 +2091,8 @@
#define regHUBP0_DCSURF_TILING_CONFIG_BASE_IDX 2
#define regHUBP0_DCSURF_PRI_VIEWPORT_START 0x05e9
#define regHUBP0_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP0_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE 0x05ea
+#define regHUBP0_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE_BASE_IDX 2
#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x05eb
#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
#define regHUBP0_DCSURF_PRI_VIEWPORT_START_C 0x05ec
@@ -2121,6 +2125,8 @@
#define regHUBP0_DCHUBP_MALL_SUB_VP1_BASE_IDX 2
#define regHUBP0_DCHUBP_MALL_SUB_VP2 0x05fa
#define regHUBP0_DCHUBP_MALL_SUB_VP2_BASE_IDX 2
+#define regHUBP0_DCHUBP_MCACHEID_CONFIG 0x05fb
+#define regHUBP0_DCHUBP_MCACHEID_CONFIG_BASE_IDX 2
#define regHUBP0_HUBPREQ_DEBUG_DB 0x05fc
#define regHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
#define regHUBP0_HUBPREQ_DEBUG 0x05fd
@@ -2378,6 +2384,8 @@
#define regHUBP1_DCSURF_TILING_CONFIG_BASE_IDX 2
#define regHUBP1_DCSURF_PRI_VIEWPORT_START 0x06c5
#define regHUBP1_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP1_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE 0x06c6
+#define regHUBP1_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE_BASE_IDX 2
#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION 0x06c7
#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
#define regHUBP1_DCSURF_PRI_VIEWPORT_START_C 0x06c8
@@ -2410,6 +2418,8 @@
#define regHUBP1_DCHUBP_MALL_SUB_VP1_BASE_IDX 2
#define regHUBP1_DCHUBP_MALL_SUB_VP2 0x06d6
#define regHUBP1_DCHUBP_MALL_SUB_VP2_BASE_IDX 2
+#define regHUBP1_DCHUBP_MCACHEID_CONFIG 0x06d7
+#define regHUBP1_DCHUBP_MCACHEID_CONFIG_BASE_IDX 2
#define regHUBP1_HUBPREQ_DEBUG_DB 0x06d8
#define regHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
#define regHUBP1_HUBPREQ_DEBUG 0x06d9
@@ -2667,6 +2677,8 @@
#define regHUBP2_DCSURF_TILING_CONFIG_BASE_IDX 2
#define regHUBP2_DCSURF_PRI_VIEWPORT_START 0x07a1
#define regHUBP2_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP2_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE 0x07a2
+#define regHUBP2_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE_BASE_IDX 2
#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION 0x07a3
#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
#define regHUBP2_DCSURF_PRI_VIEWPORT_START_C 0x07a4
@@ -2699,6 +2711,8 @@
#define regHUBP2_DCHUBP_MALL_SUB_VP1_BASE_IDX 2
#define regHUBP2_DCHUBP_MALL_SUB_VP2 0x07b2
#define regHUBP2_DCHUBP_MALL_SUB_VP2_BASE_IDX 2
+#define regHUBP2_DCHUBP_MCACHEID_CONFIG 0x07b3
+#define regHUBP2_DCHUBP_MCACHEID_CONFIG_BASE_IDX 2
#define regHUBP2_HUBPREQ_DEBUG_DB 0x07b4
#define regHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
#define regHUBP2_HUBPREQ_DEBUG 0x07b5
@@ -2957,6 +2971,8 @@
#define regHUBP3_DCSURF_TILING_CONFIG_BASE_IDX 2
#define regHUBP3_DCSURF_PRI_VIEWPORT_START 0x087d
#define regHUBP3_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP3_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE 0x087e
+#define regHUBP3_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE_BASE_IDX 2
#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION 0x087f
#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
#define regHUBP3_DCSURF_PRI_VIEWPORT_START_C 0x0880
@@ -2989,6 +3005,8 @@
#define regHUBP3_DCHUBP_MALL_SUB_VP1_BASE_IDX 2
#define regHUBP3_DCHUBP_MALL_SUB_VP2 0x088e
#define regHUBP3_DCHUBP_MALL_SUB_VP2_BASE_IDX 2
+#define regHUBP3_DCHUBP_MCACHEID_CONFIG 0x088f
+#define regHUBP3_DCHUBP_MCACHEID_CONFIG_BASE_IDX 2
#define regHUBP3_HUBPREQ_DEBUG_DB 0x0890
#define regHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
#define regHUBP3_HUBPREQ_DEBUG 0x0891
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
index 0c68f5d818bb..f42a276499cd 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
@@ -6430,6 +6430,28 @@
//DCHUBBUB_SDPIF_MEM_PWR_STATUS
#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE__SHIFT 0x0
#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE_MASK 0x00000003L
+//DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_FLIP_AWAY_MISSING_PIPE0__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_FLIP_AWAY_MISSING_PIPE1__SHIFT 0x1
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_FLIP_AWAY_MISSING_PIPE2__SHIFT 0x2
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_FLIP_AWAY_MISSING_PIPE3__SHIFT 0x3
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_CLEAR_PIPE0__SHIFT 0xc
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_CLEAR_PIPE1__SHIFT 0xd
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_CLEAR_PIPE2__SHIFT 0xe
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_CLEAR_PIPE3__SHIFT 0xf
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_ENABLE__SHIFT 0x1c
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_VREADY_MODE__SHIFT 0x1f
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_FLIP_AWAY_MISSING_PIPE0_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_FLIP_AWAY_MISSING_PIPE1_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_FLIP_AWAY_MISSING_PIPE2_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_FLIP_AWAY_MISSING_PIPE3_MASK 0x00000008L
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_CLEAR_PIPE0_MASK 0x00001000L
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_CLEAR_PIPE1_MASK 0x00002000L
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_CLEAR_PIPE2_MASK 0x00004000L
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_CLEAR_PIPE3_MASK 0x00008000L
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_ERR_ENABLE_MASK 0x10000000L
+#define DCHUBBUB_SDPIF_MCACHE_INVALIDATION_CTL__DCHUBBUB_SDPIF_MCACHEID_INV_VREADY_MODE_MASK 0x80000000L
+
// addressBlock: dcn_dcec_dchubbubl_hubbub_ret_path_dispdec
@@ -7084,6 +7106,11 @@
#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x0000FFFFL
#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0xFFFF0000L
+//HUBP0_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE
+#define HUBP0_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE__SHIFT 0x0
+#define HUBP0_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_C__SHIFT 0x10
+#define HUBP0_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_MASK 0x0000FFFFL
+#define HUBP0_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_C_MASK 0xFFFF0000L
//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
@@ -7244,6 +7271,23 @@
#define HUBP0_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_NEXT_S1__SHIFT 0xc
#define HUBP0_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_CURR_S1_MASK 0x00000FFFL
#define HUBP0_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_NEXT_S1_MASK 0x00FFF000L
+//HUBP0_DCHUBP_MCACHEID_CONFIG
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P0__SHIFT 0x0
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P0__SHIFT 0x4
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P1__SHIFT 0x8
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P1__SHIFT 0xc
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P0__SHIFT 0x10
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P0__SHIFT 0x14
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P1__SHIFT 0x18
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P1__SHIFT 0x1c
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P0_MASK 0x0000000FL
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P0_MASK 0x000000F0L
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P1_MASK 0x00000F00L
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P1_MASK 0x0000F000L
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P0_MASK 0x000F0000L
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P0_MASK 0x00F00000L
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P1_MASK 0x0F000000L
+#define HUBP0_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P1_MASK 0xF0000000L
//HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK
#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
@@ -8013,6 +8057,11 @@
#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x0000FFFFL
#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0xFFFF0000L
+//HUBP1_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE
+#define HUBP1_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE__SHIFT 0x0
+#define HUBP1_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_C__SHIFT 0x10
+#define HUBP1_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_MASK 0x0000FFFFL
+#define HUBP1_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_C_MASK 0xFFFF0000L
//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION
#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
@@ -8173,6 +8222,23 @@
#define HUBP1_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_NEXT_S1__SHIFT 0xc
#define HUBP1_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_CURR_S1_MASK 0x00000FFFL
#define HUBP1_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_NEXT_S1_MASK 0x00FFF000L
+//HUBP1_DCHUBP_MCACHEID_CONFIG
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P0__SHIFT 0x0
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P0__SHIFT 0x4
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P1__SHIFT 0x8
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P1__SHIFT 0xc
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P0__SHIFT 0x10
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P0__SHIFT 0x14
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P1__SHIFT 0x18
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P1__SHIFT 0x1c
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P0_MASK 0x0000000FL
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P0_MASK 0x000000F0L
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P1_MASK 0x00000F00L
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P1_MASK 0x0000F000L
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P0_MASK 0x000F0000L
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P0_MASK 0x00F00000L
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P1_MASK 0x0F000000L
+#define HUBP1_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P1_MASK 0xF0000000L
//HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK
#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
@@ -8942,6 +9008,11 @@
#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x0000FFFFL
#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0xFFFF0000L
+//HUBP2_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE
+#define HUBP2_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE__SHIFT 0x0
+#define HUBP2_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_C__SHIFT 0x10
+#define HUBP2_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_MASK 0x0000FFFFL
+#define HUBP2_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_C_MASK 0xFFFF0000L
//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION
#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
@@ -9102,6 +9173,23 @@
#define HUBP2_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_NEXT_S1__SHIFT 0xc
#define HUBP2_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_CURR_S1_MASK 0x00000FFFL
#define HUBP2_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_NEXT_S1_MASK 0x00FFF000L
+//HUBP2_DCHUBP_MCACHEID_CONFIG
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P0__SHIFT 0x0
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P0__SHIFT 0x4
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P1__SHIFT 0x8
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P1__SHIFT 0xc
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P0__SHIFT 0x10
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P0__SHIFT 0x14
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P1__SHIFT 0x18
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P1__SHIFT 0x1c
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P0_MASK 0x0000000FL
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P0_MASK 0x000000F0L
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P1_MASK 0x00000F00L
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P1_MASK 0x0000F000L
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P0_MASK 0x000F0000L
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P0_MASK 0x00F00000L
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P1_MASK 0x0F000000L
+#define HUBP2_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P1_MASK 0xF0000000L
//HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK
#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
@@ -9871,6 +9959,11 @@
#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x0000FFFFL
#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0xFFFF0000L
+//HUBP3_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE
+#define HUBP3_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE__SHIFT 0x0
+#define HUBP3_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_C__SHIFT 0x10
+#define HUBP3_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_MASK 0x0000FFFFL
+#define HUBP3_DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE__VIEWPORT_MCACHE_SPLIT_COORDINATE_C_MASK 0xFFFF0000L
//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION
#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
@@ -10031,6 +10124,23 @@
#define HUBP3_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_NEXT_S1__SHIFT 0xc
#define HUBP3_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_CURR_S1_MASK 0x00000FFFL
#define HUBP3_DCHUBP_MALL_SUB_VP2__SUB_VP_HEIGHT_NEXT_S1_MASK 0x00FFF000L
+//HUBP3_DCHUBP_MCACHEID_CONFIG
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P0__SHIFT 0x0
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P0__SHIFT 0x4
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P1__SHIFT 0x8
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P1__SHIFT 0xc
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P0__SHIFT 0x10
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P0__SHIFT 0x14
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P1__SHIFT 0x18
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P1__SHIFT 0x1c
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P0_MASK 0x0000000FL
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P0_MASK 0x000000F0L
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_1H_P1_MASK 0x00000F00L
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_REG_READ_2H_P1_MASK 0x0000F000L
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P0_MASK 0x000F0000L
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P0_MASK 0x00F00000L
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_1H_P1_MASK 0x0F000000L
+#define HUBP3_DCHUBP_MCACHEID_CONFIG__MCACHEID_MALL_PREF_2H_P1_MASK 0xF0000000L
//HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK
#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/isp/irqsrcs_isp_4_1.h b/drivers/gpu/drm/amd/include/ivsrcid/isp/irqsrcs_isp_4_1.h
new file mode 100644
index 000000000000..2ecdfd4f1b03
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/isp/irqsrcs_isp_4_1.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __IRQSRCS_ISP_4_1_H__
+#define __IRQSRCS_ISP_4_1_H__
+
+
+#define ISP_4_1__SRCID__ISP_SEMA_WAIT_FAIL_TIMEOUT 0x12 // Semaphore wait fail timeout
+#define ISP_4_1__SRCID__ISP_SEMA_WAIT_INCOMPLETE_TIMEOUT 0x13 // Semaphore wait incomplete timeout
+#define ISP_4_1__SRCID__ISP_SEMA_SIGNAL_INCOMPLETE_TIMEOUT 0x14 // Semaphore signal incomplete timeout
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE5_CHANGED 0x15 // Ringbuffer base5 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT5 0x16 // Ringbuffer write point 5 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE6_CHANGED 0x17 // Ringbuffer base6 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT6 0x18 // Ringbuffer write point 6 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE7_CHANGED 0x19 // Ringbuffer base7 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT7 0x1A // Ringbuffer write point 7 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE8_CHANGED 0x1B // Ringbuffer base8 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT8 0x1C // Ringbuffer write point 8 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE9_CHANGED 0x00 // Ringbuffer base9 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9 0x01 // Ringbuffer write point 9 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE10_CHANGED 0x02 // Ringbuffer base10 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10 0x03 // Ringbuffer write point 10 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE11_CHANGED 0x04 // Ringbuffer base11 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT11 0x05 // Ringbuffer write point 11 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE12_CHANGED 0x06 // Ringbuffer base12 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT12 0x07 // Ringbuffer write point 12 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE13_CHANGED 0x08 // Ringbuffer base13 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT13 0x09 // Ringbuffer write point 13 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE14_CHANGED 0x0A // Ringbuffer base14 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT14 0x0B // Ringbuffer write point 14 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE15_CHANGED 0x0C // Ringbuffer base15 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT15 0x0D // Ringbuffer write point 15 changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_BASE16_CHANGED 0x0E // Ringbuffer base16 address changed
+#define ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16 0x0F // Ringbuffer write point 16 changed
+#define ISP_4_1__SRCID__ISP_MIPI0 0x29 // MIPI0 interrupt
+#define ISP_4_1__SRCID__ISP_MIPI1 0x2A // MIPI1 interrupt
+#define ISP_4_1__SRCID__ISP_I2C0 0x2B // I2C0 PAD interrupt
+#define ISP_4_1__SRCID__ISP_I2C1 0x2C // I2C1 PAD interrupt
+#define ISP_4_1__SRCID__ISP_FLASH0 0x2D // Flash0 interrupt
+#define ISP_4_1__SRCID__ISP_FLASH1 0x2E // Flash1 interrupt
+#define ISP_4_1__SRCID__ISP_DEBUG 0x2F // Debug information
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8e694b576d1c..00384b381289 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -237,7 +237,6 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
/*
@@ -253,7 +252,7 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
return 0;
ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
- if (!ret && !adev->enable_jpeg_test)
+ if (!ret)
atomic_set(&power_gate->vcn_gated, !enable);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 7179cdacf156..98ea58d792ca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1869,34 +1869,6 @@ static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
return ret;
}
-static int smu_v14_0_2_set_vcn_enable(struct smu_context *smu,
- bool enable)
-{
- struct amdgpu_device *adev = smu->adev;
- struct smu_power_gate *power_gate = &smu->smu_power.power_gate;
- int i, ret = 0;
-
- if (!adev->enable_jpeg_test)
- return smu_v14_0_set_vcn_enable(smu, enable);
-
- if (!atomic_read(&power_gate->vcn_gated) || !enable)
- return 0;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
- i << 16U, NULL);
- if (ret)
- return ret;
- }
-
- atomic_set(&power_gate->vcn_gated, 0);
-
- return ret;
-}
-
static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
@@ -1919,7 +1891,7 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.system_features_control = smu_v14_0_system_features_control,
.set_allowed_mask = smu_v14_0_set_allowed_mask,
.get_enabled_mask = smu_cmn_get_enabled_mask,
- .dpm_set_vcn_enable = smu_v14_0_2_set_vcn_enable,
+ .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable,
.dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable,
.get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq,
.get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,