summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2024-06-27 10:18:49 +0300
committerDave Airlie <airlied@redhat.com>2024-06-27 10:18:49 +0300
commit365aa9f573995b46ca14a24165d85e31160e47b9 (patch)
tree392d9656fc902e8198ee89efe278c78376478f8a
parent541b1b0a8fc235bca355921eb7f3f59a8efa3e9a (diff)
parent1ecef5589320fd56af599b624d59c355d162ac7b (diff)
downloadlinux-365aa9f573995b46ca14a24165d85e31160e47b9.tar.xz
Merge tag 'amd-drm-next-6.11-2024-06-22' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.11-2024-06-22: amdgpu: - HPD fixes - PSR fixes - DCC updates - DCN 4.0.1 fixes - FAMS fixes - Misc code cleanups - SR-IOV fixes - GPUVM TLB flush cleanups - Make VCN less verbose - ACPI backlight fixes - MES fixes - Firmware loading cleanups - Replay fixes - LTTPR fixes - Trap handler fixes - Cursor and overlay fixes - Primary plane zpos fixes - DML 2.1 fixes - RAS updates - USB4 fixes - MALL fixes - Reserved VMID fix - Silence UBSAN warnings amdkfd: - Misc code cleanups From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240622152523.2267072-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0_0_pkt_open.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm38
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c79
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c13
-rw-r--r--drivers/gpu/drm/amd/display/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c630
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c244
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/smu13_driver_if.h108
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/Makefile63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c)10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_stream_encoder.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/Makefile37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c159
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile45
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile45
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn401/dcn401_mpc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn401/dcn401_mpc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h182
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c14
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h36
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h38
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c73
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
271 files changed, 3226 insertions, 2015 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 35faa286bb1c..7dab4768cee6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1441,6 +1441,7 @@ u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
u32 reg);
void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
u32 reg, u32 v);
+struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
@@ -1567,6 +1568,7 @@ static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state) { return 0; }
static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
enum amdgpu_ss ss_state) { return 0; }
+static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 01d50ad603d3..7945173321a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -222,9 +222,9 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_
INIT_LIST_HEAD(&bank_error->node);
memcpy(&bank_error->info, info, sizeof(*info));
- spin_lock(&aerr->lock);
+ mutex_lock(&aerr->lock);
list_add_tail(&bank_error->node, &aerr->list);
- spin_unlock(&aerr->lock);
+ mutex_unlock(&aerr->lock);
return bank_error;
}
@@ -235,7 +235,7 @@ static struct aca_bank_error *find_bank_error(struct aca_error *aerr, struct aca
struct aca_bank_info *tmp_info;
bool found = false;
- spin_lock(&aerr->lock);
+ mutex_lock(&aerr->lock);
list_for_each_entry(bank_error, &aerr->list, node) {
tmp_info = &bank_error->info;
if (tmp_info->socket_id == info->socket_id &&
@@ -246,7 +246,7 @@ static struct aca_bank_error *find_bank_error(struct aca_error *aerr, struct aca
}
out_unlock:
- spin_unlock(&aerr->lock);
+ mutex_unlock(&aerr->lock);
return found ? bank_error : NULL;
}
@@ -474,7 +474,7 @@ static int aca_log_aca_error(struct aca_handle *handle, enum aca_error_type type
struct aca_error *aerr = &error_cache->errors[type];
struct aca_bank_error *bank_error, *tmp;
- spin_lock(&aerr->lock);
+ mutex_lock(&aerr->lock);
if (list_empty(&aerr->list))
goto out_unlock;
@@ -485,7 +485,7 @@ static int aca_log_aca_error(struct aca_handle *handle, enum aca_error_type type
}
out_unlock:
- spin_unlock(&aerr->lock);
+ mutex_unlock(&aerr->lock);
return 0;
}
@@ -542,7 +542,7 @@ int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *han
static void aca_error_init(struct aca_error *aerr, enum aca_error_type type)
{
- spin_lock_init(&aerr->lock);
+ mutex_init(&aerr->lock);
INIT_LIST_HEAD(&aerr->list);
aerr->type = type;
aerr->nr_errors = 0;
@@ -561,10 +561,11 @@ static void aca_error_fini(struct aca_error *aerr)
{
struct aca_bank_error *bank_error, *tmp;
- spin_lock(&aerr->lock);
+ mutex_lock(&aerr->lock);
list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
aca_bank_error_remove(aerr, bank_error);
- spin_unlock(&aerr->lock);
+
+ mutex_destroy(&aerr->lock);
}
static void aca_fini_error_cache(struct aca_handle *handle)
@@ -712,6 +713,15 @@ void amdgpu_aca_fini(struct amdgpu_device *adev)
atomic_set(&aca->ue_update_flag, 0);
}
+int amdgpu_aca_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+
+ atomic_set(&aca->ue_update_flag, 0);
+
+ return 0;
+}
+
void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs)
{
struct amdgpu_aca *aca = &adev->aca;
@@ -885,9 +895,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(aca_debug_mode_fops, NULL, amdgpu_aca_smu_debug_mode_se
void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
{
#if defined(CONFIG_DEBUG_FS)
- if (!root ||
- (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6) &&
- adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 14)))
+ if (!root)
return;
debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index 4327ce1ceacf..5ef6b745f222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -25,7 +25,6 @@
#define __AMDGPU_ACA_H__
#include <linux/list.h>
-#include <linux/spinlock.h>
struct ras_err_data;
struct ras_query_context;
@@ -134,7 +133,7 @@ struct aca_bank_error {
struct aca_error {
struct list_head list;
- spinlock_t lock;
+ struct mutex lock;
enum aca_error_type type;
int nr_errors;
};
@@ -192,6 +191,7 @@ struct aca_info {
int amdgpu_aca_init(struct amdgpu_device *adev);
void amdgpu_aca_fini(struct amdgpu_device *adev);
+int amdgpu_aca_reset(struct amdgpu_device *adev);
void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs);
bool amdgpu_aca_is_enabled(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 7099ff9cf8c5..f85ace0384d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -383,6 +383,8 @@ static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
characteristics.min_input_signal;
atif->backlight_caps.max_input_signal =
characteristics.max_input_signal;
+ atif->backlight_caps.ac_level = characteristics.ac_level;
+ atif->backlight_caps.dc_level = characteristics.dc_level;
out:
kfree(info);
return err;
@@ -1268,6 +1270,8 @@ void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
caps->caps_valid = atif->backlight_caps.caps_valid;
caps->min_input_signal = atif->backlight_caps.min_input_signal;
caps->max_input_signal = atif->backlight_caps.max_input_signal;
+ caps->ac_level = atif->backlight_caps.ac_level;
+ caps->dc_level = atif->backlight_caps.dc_level;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4096cb3e937e..2de36883fc9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2350,7 +2350,6 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[40];
int err;
const struct gpu_info_firmware_header_v1_0 *hdr;
@@ -2384,12 +2383,12 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
break;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
+ "amdgpu/%s_gpu_info.bin", chip_name);
if (err) {
dev_err(adev->dev,
- "Failed to get gpu_info firmware \"%s\"\n",
- fw_name);
+ "Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n",
+ chip_name);
goto out;
}
@@ -5070,6 +5069,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive = NULL;
if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
+ amdgpu_virt_ready_to_reset(adev);
+ amdgpu_virt_wait_reset(adev);
clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
r = amdgpu_virt_request_full_gpu(adev, true);
} else {
@@ -6275,20 +6276,11 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct amdgpu_reset_context reset_context;
u32 memsize;
struct list_head device_list;
- struct amdgpu_hive_info *hive;
- int hive_ras_recovery = 0;
- struct amdgpu_ras *ras;
/* PCI error slot reset should be skipped During RAS recovery */
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive) {
- hive_ras_recovery = atomic_read(&hive->ras_recovery);
- amdgpu_put_xgmi_hive(hive);
- }
- ras = amdgpu_ras_get_context(adev);
if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
+ amdgpu_ras_in_recovery(adev))
return PCI_ERS_RESULT_RECOVERED;
DRM_INFO("PCI error: slot reset callback!!\n");
@@ -6531,6 +6523,22 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_get_gang - return a reference to the current gang
+ * @adev: amdgpu_device pointer
+ *
+ * Returns: A new reference to the current gang leader.
+ */
+struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
+{
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&adev->gang_submit);
+ rcu_read_unlock();
+ return fence;
+}
+
+/**
* amdgpu_device_switch_gang - switch to a new gang
* @adev: amdgpu_device pointer
* @gang: the gang to switch to
@@ -6546,10 +6554,7 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
do {
dma_fence_put(old);
- rcu_read_lock();
- old = dma_fence_get_rcu_safe(&adev->gang_submit);
- rcu_read_unlock();
-
+ old = amdgpu_device_get_gang(adev);
if (old == gang)
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index c71356cb393d..0cb8aea93a70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -260,19 +260,21 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
u32 msg;
int i, ret = 0;
- /* It can take up to a second for IFWI init to complete on some dGPUs,
- * but generally it should be in the 60-100ms range. Normally this starts
- * as soon as the device gets power so by the time the OS loads this has long
- * completed. However, when a card is hotplugged via e.g., USB4, we need to
- * wait for this to complete. Once the C2PMSG is updated, we can
- * continue.
- */
+ if (!amdgpu_sriov_vf(adev)) {
+ /* It can take up to a second for IFWI init to complete on some dGPUs,
+ * but generally it should be in the 60-100ms range. Normally this starts
+ * as soon as the device gets power so by the time the OS loads this has long
+ * completed. However, when a card is hotplugged via e.g., USB4, we need to
+ * wait for this to complete. Once the C2PMSG is updated, we can
+ * continue.
+ */
- for (i = 0; i < 1000; i++) {
- msg = RREG32(mmMP0_SMN_C2PMSG_33);
- if (msg & 0x80000000)
- break;
- usleep_range(1000, 1100);
+ for (i = 0; i < 1000; i++) {
+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ if (msg & 0x80000000)
+ break;
+ usleep_range(1000, 1100);
+ }
}
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 0b3b10d21952..8e81a83d37d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -41,8 +41,6 @@
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
-#include <linux/pm_runtime.h>
-#include "amdgpu_trace.h"
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
@@ -58,42 +56,11 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int r;
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(1, __func__);
- if (r < 0)
- goto out;
-
return 0;
-
-out:
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
- return r;
-}
-
-/**
- * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
- *
- * @dmabuf: DMA-buf where we remove the attachment from
- * @attach: the attachment to remove
- *
- * Called when an attachment is removed from the DMA-buf.
- */
-static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct drm_gem_object *obj = dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
}
/**
@@ -266,7 +233,6 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
- .detach = amdgpu_dma_buf_detach,
.pin = amdgpu_dma_buf_pin,
.unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 10832b470448..2f24a6aa13bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -181,7 +181,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(1, __func__);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -309,7 +308,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- trace_amdgpu_runpm_reference_dumps(0, __func__);
} while (last_seq != seq);
return true;
@@ -980,7 +978,9 @@ static void amdgpu_debugfs_reset_work(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_USER;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index c623e23049d1..256b95232de5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -34,6 +34,7 @@
#include <asm/set_memory.h>
#endif
#include "amdgpu.h"
+#include "amdgpu_reset.h"
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
@@ -325,10 +326,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
- mb();
- amdgpu_device_flush_hdp(adev, NULL);
- for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
- amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
+ amdgpu_gart_invalidate_tlb(adev);
drm_dev_exit(idx);
}
@@ -408,7 +406,10 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
return;
mb();
- amdgpu_device_flush_hdp(adev, NULL);
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_device_flush_hdp(adev, NULL);
+ up_read(&adev->reset_domain->sem);
+ }
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 3b67809a59bc..1f22b4208729 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -334,6 +334,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
AMDGPU_GEM_CREATE_ENCRYPTED |
+ AMDGPU_GEM_CREATE_GFX12_DCC |
AMDGPU_GEM_CREATE_DISCARDABLE))
return -EINVAL;
@@ -341,6 +342,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
return -EINVAL;
+ if ((flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
+ ((amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) ||
+ !(args->in.domains & AMDGPU_GEM_DOMAIN_VRAM)))
+ return -EINVAL;
+
if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 19b1817b55d7..82452606ae6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -506,9 +506,6 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
struct amdgpu_ring *kiq_ring = &kiq->ring;
- struct amdgpu_hive_info *hive;
- struct amdgpu_ras *ras;
- int hive_ras_recovery = 0;
int i, r = 0;
int j;
@@ -533,16 +530,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
* This is workaround: only skip kiq_ring test
* during ras recovery in suspend stage for gfx9.4.3
*/
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive) {
- hive_ras_recovery = atomic_read(&hive->ras_recovery);
- amdgpu_put_xgmi_hive(hive);
- }
-
- ras = amdgpu_ras_get_context(adev);
if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) {
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
+ amdgpu_ras_in_recovery(adev)) {
spin_unlock(&kiq->ring_lock);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 603c0738fd03..322b8ff67cde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -620,10 +620,8 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int r;
if (!hub->sdma_invalidation_workaround || vmid ||
- !adev->mman.buffer_funcs_enabled ||
- !adev->ib_pool_ready || amdgpu_in_reset(adev) ||
+ !adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready ||
!ring->sched.ready) {
-
/*
* A GPU reset should flush all TLBs anyway, so no need to do
* this while one is ongoing.
@@ -684,12 +682,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- signed long r;
+ int r;
uint32_t seq;
- if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready ||
- !down_read_trylock(&adev->reset_domain->sem)) {
+ /*
+ * A GPU reset should flush all TLBs anyway, so no need to do
+ * this while one is ongoing.
+ */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return 0;
+ if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
2, all_hub,
@@ -703,43 +706,40 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
flush_type, all_hub,
inst);
- return 0;
- }
+ r = 0;
+ } else {
+ /* 2 dwords flush + 8 dwords fence */
+ ndw = kiq->pmf->invalidate_tlbs_size + 8;
- /* 2 dwords flush + 8 dwords fence */
- ndw = kiq->pmf->invalidate_tlbs_size + 8;
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ if (adev->gmc.flush_tlb_needs_extra_type_0)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_0)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
+ amdgpu_ring_alloc(ring, ndw);
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
- spin_lock(&adev->gfx.kiq[inst].ring_lock);
- amdgpu_ring_alloc(ring, ndw);
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
+ if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
- if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ goto error_unlock_reset;
+ }
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
- r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
- if (r) {
- amdgpu_ring_undo(ring);
+ amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- goto error_unlock_reset;
- }
-
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
- if (r < 1) {
- dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
- r = -ETIME;
- goto error_unlock_reset;
+ if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+ dev_err(adev->dev, "timeout waiting for kiq fence\n");
+ r = -ETIME;
+ }
}
- r = 0;
error_unlock_reset:
up_read(&adev->reset_domain->sem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 44367f03316f..0760e70402ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -200,8 +200,6 @@ void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
amdgpu_ttm_recover_gart(node->base.bo);
}
spin_unlock(&mgr->lock);
-
- amdgpu_gart_invalidate_tlb(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 3d7fcdeaf8cf..b5b9d4f40f53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -290,18 +290,36 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_signaled((*id)->last_flush))) {
struct dma_fence *tmp;
- /* Don't use per engine and per process VMID at the same time */
- if (adev->vm_manager.concurrent_flush)
- ring = NULL;
-
- /* to prevent one context starved by another context */
- (*id)->pd_gpu_addr = 0;
- tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
- if (tmp) {
+ /* Wait for the gang to be assembled before using a
+ * reserved VMID or otherwise the gang could deadlock.
+ */
+ tmp = amdgpu_device_get_gang(adev);
+ if (!dma_fence_is_signaled(tmp) && tmp != job->gang_submit) {
*id = NULL;
- *fence = dma_fence_get(tmp);
+ *fence = tmp;
return 0;
}
+ dma_fence_put(tmp);
+
+ /* Make sure the id is owned by the gang before proceeding */
+ if (!job->gang_submit ||
+ (*id)->owner != vm->immediate.fence_context) {
+
+ /* Don't use per engine and per process VMID at the
+ * same time
+ */
+ if (adev->vm_manager.concurrent_flush)
+ ring = NULL;
+
+ /* to prevent one context starved by another context */
+ (*id)->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+ if (tmp) {
+ *id = NULL;
+ *fence = dma_fence_get(tmp);
+ return 0;
+ }
+ }
needs_flush = true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index e4742b65032d..cf0c4470ab9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -77,6 +77,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_JOB;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index da40c2d97df8..9d3a3c778504 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -193,27 +193,26 @@ static int amdgpu_mca_bank_set_merge(struct mca_bank_set *mca_set, struct mca_ba
return 0;
}
-static int amdgpu_mca_bank_set_remove_node(struct mca_bank_set *mca_set, struct mca_bank_node *node)
+static void amdgpu_mca_bank_set_remove_node(struct mca_bank_set *mca_set, struct mca_bank_node *node)
{
if (!node)
- return -EINVAL;
+ return;
list_del(&node->node);
kvfree(node);
mca_set->nr_entries--;
-
- return 0;
}
static void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
{
struct mca_bank_node *node, *tmp;
- list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
- list_del(&node->node);
- kvfree(node);
- }
+ if (list_empty(&mca_set->list))
+ return;
+
+ list_for_each_entry_safe(node, tmp, &mca_set->list, node)
+ amdgpu_mca_bank_set_remove_node(mca_set, node);
}
void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs)
@@ -233,7 +232,7 @@ int amdgpu_mca_init(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) {
mca_cache = &mca->mca_caches[i];
- spin_lock_init(&mca_cache->lock);
+ mutex_init(&mca_cache->lock);
amdgpu_mca_bank_set_init(&mca_cache->mca_set);
}
@@ -251,6 +250,7 @@ void amdgpu_mca_fini(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) {
mca_cache = &mca->mca_caches[i];
amdgpu_mca_bank_set_release(&mca_cache->mca_set);
+ mutex_destroy(&mca_cache->lock);
}
}
@@ -455,9 +455,9 @@ static int amdgpu_mca_add_mca_set_to_cache(struct amdgpu_device *adev, enum amdg
struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type];
int ret;
- spin_lock(&mca_cache->lock);
+ mutex_lock(&mca_cache->lock);
ret = amdgpu_mca_bank_set_merge(&mca_cache->mca_set, new);
- spin_unlock(&mca_cache->lock);
+ mutex_unlock(&mca_cache->lock);
return ret;
}
@@ -487,10 +487,10 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
}
/* dispatch mca set again if mca cache has valid data */
- spin_lock(&mca_cache->lock);
+ mutex_lock(&mca_cache->lock);
if (mca_cache->mca_set.nr_entries)
ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_cache->mca_set, err_data);
- spin_unlock(&mca_cache->lock);
+ mutex_unlock(&mca_cache->lock);
out_mca_release:
amdgpu_mca_bank_set_release(&mca_set);
@@ -608,9 +608,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_se
void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
{
#if defined(CONFIG_DEBUG_FS)
- if (!root ||
- (amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6) &&
- amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 14)))
+ if (!root)
return;
debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index c3c184c88dad..e80323ff90c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -84,7 +84,7 @@ struct mca_bank_set {
struct mca_bank_cache {
struct mca_bank_set mca_set;
- spinlock_t lock;
+ struct mutex lock;
};
struct amdgpu_mca {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 62edf6328566..e499d6ba306b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -32,18 +32,6 @@
#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
#define AMDGPU_ONE_DOORBELL_SIZE 8
-signed long amdgpu_mes_fence_wait_polling(u64 *fence,
- u64 wait_seq,
- signed long timeout)
-{
-
- while ((s64)(wait_seq - *fence) > 0 && timeout > 0) {
- udelay(2);
- timeout -= 2;
- }
- return timeout > 0 ? timeout : 0;
-}
-
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
{
return roundup(AMDGPU_ONE_DOORBELL_SIZE *
@@ -1528,11 +1516,9 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
- ucode_prefix);
- DRM_INFO("try to fall back to %s\n", fw_name);
+ dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
- fw_name);
+ "amdgpu/%s_mes.bin", ucode_prefix);
}
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index df9f0404d842..e11051271f71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -352,10 +352,6 @@ struct amdgpu_mes_funcs {
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
-signed long amdgpu_mes_fence_wait_polling(u64 *fence,
- u64 wait_seq,
- signed long timeout);
-
int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 5a648a657dc6..93025522fbfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -3181,12 +3181,10 @@ int psp_ring_cmd_submit(struct psp_context *psp,
int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
if (err)
goto out;
@@ -3205,12 +3203,10 @@ out:
int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
if (err)
goto out;
@@ -3362,7 +3358,6 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *sos_hdr;
const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
@@ -3372,8 +3367,7 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int fw_index = 0;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3598,11 +3592,9 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
{
const struct common_firmware_header *hdr;
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
int err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
@@ -3628,7 +3620,6 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
struct amdgpu_firmware_info *info = NULL;
int err = 0;
@@ -3638,8 +3629,7 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
return -EINVAL;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
if (err) {
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
@@ -3713,7 +3703,6 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret, idx;
- char fw_name[100];
const struct firmware *usbc_pd_fw;
struct amdgpu_bo *fw_buf_bo = NULL;
uint64_t fw_pri_mc_addr;
@@ -3727,8 +3716,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
if (!drm_dev_enter(ddev, &idx))
return -ENODEV;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
- ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
+ ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
if (ret)
goto fail;
@@ -3750,7 +3738,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
rel_buf:
- release_firmware(usbc_pd_fw);
+ amdgpu_ucode_release(&usbc_pd_fw);
fail:
if (ret) {
dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index b3d11703df04..68e9935028db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1409,11 +1409,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
enum amdgpu_ras_block block)
{
struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
- struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
- struct amdgpu_hive_info *hive;
- int hive_ras_recovery = 0;
if (!block_obj || !block_obj->hw_ops) {
dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
@@ -1425,15 +1422,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
!amdgpu_ras_get_aca_debug_mode(adev))
return -EOPNOTSUPP;
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive) {
- hive_ras_recovery = atomic_read(&hive->ras_recovery);
- amdgpu_put_xgmi_hive(hive);
- }
-
/* skip ras error reset in gpu reset */
- if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
- hive_ras_recovery) &&
+ if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
((smu_funcs && smu_funcs->set_debug_mode) ||
(mca_funcs && mca_funcs->mca_set_debug_mode)))
return -EOPNOTSUPP;
@@ -1911,6 +1901,23 @@ static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
obj, &amdgpu_ras_debugfs_ops);
}
+static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
+{
+ bool ret;
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
+
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -1937,10 +1944,12 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
}
}
- if (amdgpu_aca_is_enabled(adev))
- amdgpu_aca_smu_debugfs_init(adev, dir);
- else
- amdgpu_mca_smu_debugfs_init(adev, dir);
+ if (amdgpu_ras_aca_is_supported(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_aca_smu_debugfs_init(adev, dir);
+ else
+ amdgpu_mca_smu_debugfs_init(adev, dir);
+ }
}
/* debugfs end */
@@ -2049,8 +2058,9 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
struct amdgpu_device *adev = obj->adev;
struct amdgpu_ras_block_object *block_obj =
amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!block_obj)
+ if (!block_obj || !con)
return;
/* both query_poison_status and handle_poison_consumption are optional,
@@ -2073,14 +2083,17 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
- /* gpu reset is fallback for failed and default cases */
- if (poison_stat) {
+ /* gpu reset is fallback for failed and default cases.
+ * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
+ */
+ if (poison_stat && !con->is_rma) {
dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
block_obj->ras_comm.name);
amdgpu_ras_reset_gpu(adev);
- } else {
- amdgpu_gfx_poison_consumption_handler(adev, entry);
}
+
+ if (!poison_stat)
+ amdgpu_gfx_poison_consumption_handler(adev, entry);
}
static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
@@ -2118,6 +2131,7 @@ static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
/* Let IP handle its data, maybe we need get the output
* from the callback to update the error type/count, etc
*/
+ amdgpu_ras_set_fed(obj->adev, true);
ret = data->cb(obj->adev, &err_data, entry);
/* ue will trigger an interrupt, and in that case
* we need do a reset to recovery the whole system.
@@ -2438,6 +2452,23 @@ static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
}
}
+bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int hive_ras_recovery = 0;
+
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+
+ if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
+ return true;
+
+ return false;
+}
+
static void amdgpu_ras_do_recovery(struct work_struct *work)
{
struct amdgpu_ras *ras =
@@ -2487,6 +2518,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_RAS;
/* Perform full reset in fatal error mode */
if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
@@ -2795,16 +2827,21 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
page_retirement_dwork.work);
struct amdgpu_device *adev = con->adev;
struct ras_err_data err_data;
+ unsigned long err_cnt;
- if (amdgpu_in_reset(adev) || atomic_read(&con->in_recovery))
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev))
return;
amdgpu_ras_error_data_init(&err_data);
amdgpu_umc_handle_bad_pages(adev, &err_data);
+ err_cnt = err_data.err_addr_cnt;
amdgpu_ras_error_data_fini(&err_data);
+ if (err_cnt && con->is_rma)
+ amdgpu_ras_reset_gpu(adev);
+
mutex_lock(&con->umc_ecc_log.lock);
if (radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
UMC_ECC_NEW_DETECTED_TAG))
@@ -2861,7 +2898,8 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
if (poison_msg->pasid_fn)
poison_msg->pasid_fn(adev, pasid, poison_msg->data);
- if (reset) {
+ /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
+ if (reset && !con->is_rma) {
flush_delayed_work(&con->page_retirement_dwork);
con->gpu_reset_flags |= reset;
@@ -3427,6 +3465,15 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ if (amdgpu_ras_aca_is_supported(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ r = amdgpu_aca_init(adev);
+ else
+ r = amdgpu_mca_init(adev);
+ if (r)
+ goto release_con;
+ }
+
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
adev->ras_hw_enabled, adev->ras_enabled);
@@ -3635,25 +3682,22 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
amdgpu_ras_event_mgr_init(adev);
- if (amdgpu_aca_is_enabled(adev)) {
- if (!amdgpu_in_reset(adev)) {
- r = amdgpu_aca_init(adev);
+ if (amdgpu_ras_aca_is_supported(adev)) {
+ if (amdgpu_in_reset(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ r = amdgpu_aca_reset(adev);
+ else
+ r = amdgpu_mca_reset(adev);
if (r)
return r;
}
- if (!amdgpu_sriov_vf(adev))
- amdgpu_ras_set_aca_debug_mode(adev, false);
- } else {
- if (amdgpu_in_reset(adev))
- r = amdgpu_mca_reset(adev);
- else
- r = amdgpu_mca_init(adev);
- if (r)
- return r;
-
- if (!amdgpu_sriov_vf(adev))
- amdgpu_ras_set_mca_debug_mode(adev, false);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_ras_set_aca_debug_mode(adev, false);
+ else
+ amdgpu_ras_set_mca_debug_mode(adev, false);
+ }
}
/* Guest side doesn't need init ras feature */
@@ -3727,10 +3771,12 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
amdgpu_ras_fs_fini(adev);
amdgpu_ras_interrupt_remove_all(adev);
- if (amdgpu_aca_is_enabled(adev))
- amdgpu_aca_fini(adev);
- else
- amdgpu_mca_fini(adev);
+ if (amdgpu_ras_aca_is_supported(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_aca_fini(adev);
+ else
+ amdgpu_mca_fini(adev);
+ }
WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
@@ -3982,6 +4028,12 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ /* mode1 is the only selection for RMA status */
+ if (ras->is_rma) {
+ ras->gpu_reset_flags = 0;
+ ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ }
+
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index e70c45712ddb..83437fef9df5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -954,6 +954,8 @@ int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset);
+bool amdgpu_ras_in_recovery(struct amdgpu_device *adev);
+
__printf(3, 4)
void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
const char *fmt, ...);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 9deb41d61e8d..66c1a868c0e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -164,16 +164,14 @@ void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
size_t len)
{
- struct amdgpu_ring *ring;
-
if (!buf || !len)
return;
switch (rst_ctxt->src) {
case AMDGPU_RESET_SRC_JOB:
if (rst_ctxt->job) {
- ring = amdgpu_job_ring(rst_ctxt->job);
- snprintf(buf, len, "job hang on ring:%s", ring->name);
+ snprintf(buf, len, "job hang on ring:%s",
+ rst_ctxt->job->base.sched->name);
} else {
strscpy(buf, "job hang", len);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 151f83ea803b..183a976ba29d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -215,14 +215,14 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
const struct sdma_firmware_header_v3_0 *sdma_hv3;
uint16_t version_major;
char ucode_prefix[30];
- char fw_name[52];
amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
if (instance == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ "amdgpu/%s.bin", ucode_prefix);
else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s%d.bin", ucode_prefix, instance);
- err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ "amdgpu/%s%d.bin", ucode_prefix, instance);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8de2d05d0735..58906bf7448e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -295,8 +295,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_res_cursor src_mm, dst_mm;
struct dma_fence *fence = NULL;
int r = 0;
-
uint32_t copy_flags = 0;
+ struct amdgpu_bo *abo_src, *abo_dst;
if (!adev->mman.buffer_funcs_enabled) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
@@ -325,8 +325,14 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
if (r)
goto error;
+ abo_src = ttm_to_amdgpu_bo(src->bo);
+ abo_dst = ttm_to_amdgpu_bo(dst->bo);
if (tmz)
copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
+ if (abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED;
+ if (abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED;
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
&next, false, true, copy_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index b6f53129dea3..f2eb1cf364c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -110,6 +110,8 @@ struct amdgpu_copy_mem {
};
#define AMDGPU_COPY_FLAGS_TMZ (1 << 0)
+#define AMDGPU_COPY_FLAGS_READ_DECOMPRESSED (1 << 1)
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESSED (1 << 2)
int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
@@ -146,7 +148,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
bool enable);
-
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 75ece8a2f96b..3588f1c5a007 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -28,6 +28,8 @@
#include "amdgpu.h"
#include "amdgpu_ucode.h"
+#define AMDGPU_UCODE_NAME_MAX (128)
+
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@@ -1427,28 +1429,40 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
*
* @adev: amdgpu device
* @fw: pointer to load firmware to
- * @fw_name: firmware to load
+ * @fmt: firmware name format string
+ * @...: variable arguments
*
* This is a helper that will use request_firmware and amdgpu_ucode_validate
* to load and run basic validation on firmware. If the load fails, remap
* the error code to -ENODEV, so that early_init functions will fail to load.
*/
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fw_name)
+ const char *fmt, ...)
{
- int err = request_firmware(fw, fw_name, adev->dev);
+ char fname[AMDGPU_UCODE_NAME_MAX];
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vsnprintf(fname, sizeof(fname), fmt, ap);
+ va_end(ap);
+ if (r == sizeof(fname)) {
+ dev_warn(adev->dev, "amdgpu firmware name buffer overflow\n");
+ return -EOVERFLOW;
+ }
- if (err)
+ r = request_firmware(fw, fname, adev->dev);
+ if (r)
return -ENODEV;
- err = amdgpu_ucode_validate(*fw);
- if (err) {
- dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
+ r = amdgpu_ucode_validate(*fw);
+ if (r) {
+ dev_dbg(adev->dev, "\"%s\" failed to validate\n", fname);
release_firmware(*fw);
*fw = NULL;
}
- return err;
+ return r;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index a3c04f711099..f4e5285c4dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -593,8 +593,9 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
+__printf(3, 4)
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fw_name);
+ const char *fmt, ...);
void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 540e0f066b26..20e0e522fb51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -195,7 +195,8 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
- if (err_data->ue_count && reset) {
+ if ((err_data->ue_count || err_data->de_count) &&
+ (reset || (con && con->is_rma))) {
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
@@ -211,6 +212,7 @@ int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
.block = AMDGPU_RAS_BLOCK__UMC,
};
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
uint32_t timeout = timeout_ms;
memset(&err_data, 0, sizeof(err_data));
@@ -243,9 +245,7 @@ int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (reset) {
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-
+ if (reset || (err_data.err_addr_cnt && con && con->is_rma)) {
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 5e2b7c340724..8d65b096db90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -94,18 +94,14 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_early_init(struct amdgpu_device *adev)
{
char ucode_prefix[25];
- char fw_name[40];
int r, i;
+ amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6) &&
- i == 1) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i);
- }
-
- r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name);
+ if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
+ r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s_%d.bin", ucode_prefix, i);
+ else
+ r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s.bin", ucode_prefix);
if (r) {
amdgpu_ucode_release(&adev->vcn.fw[i]);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index b56d243c53ab..63f2286858c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -153,6 +153,20 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
}
/**
+ * amdgpu_virt_ready_to_reset() - send ready to reset to host
+ * @adev: amdgpu device.
+ * Send ready to reset message to GPU hypervisor to signal we have stopped GPU
+ * activity and is ready for host FLR
+ */
+void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->reset_gpu)
+ virt->ops->ready_to_reset(adev);
+}
+
+/**
* amdgpu_virt_wait_reset() - wait for reset gpu completed
* @adev: amdgpu device.
* Wait for GPU reset completed.
@@ -979,6 +993,9 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
return 0;
}
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
@@ -1055,6 +1072,9 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
{
u32 rlcg_flag;
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
if (!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
@@ -1072,6 +1092,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
{
u32 rlcg_flag;
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
if (!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 0ec246c74570..f04cd1586c72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -88,6 +88,7 @@ struct amdgpu_virt_ops {
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
int (*req_init_data)(struct amdgpu_device *adev);
int (*reset_gpu)(struct amdgpu_device *adev);
+ void (*ready_to_reset)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
u32 data1, u32 data2, u32 data3);
@@ -347,6 +348,7 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
+void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev);
int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 8e3501e3765b..046949c4b695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -128,6 +128,7 @@ struct amdgpu_mem_stats;
(((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_GFX12_MASK)) | \
AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype))
+#define AMDGPU_PTE_DCC (1ULL << 58)
#define AMDGPU_PTE_IS_PTE (1ULL << 63)
/* PDE Block Fragment Size for gfx v12 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 49881073ff58..bad232859972 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -232,13 +232,11 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
{
struct amdgpu_device *adev = vpe->ring.adev;
const struct vpe_firmware_header_v1_0 *vpe_hdr;
- char fw_prefix[32], fw_name[64];
+ char fw_prefix[32];
int ret;
amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", fw_prefix);
-
- ret = amdgpu_ucode_request(adev, &adev->vpe.fw, fw_name);
+ ret = amdgpu_ucode_request(adev, &adev->vpe.fw, "amdgpu/%s.bin", fw_prefix);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index d552e013354c..09715b506468 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -301,7 +301,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr) += 4;
if (print)
DEBUG("IMM 0x%08X\n", val);
- return val;
+ break;
case ATOM_SRC_WORD0:
case ATOM_SRC_WORD8:
case ATOM_SRC_WORD16:
@@ -309,7 +309,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr) += 2;
if (print)
DEBUG("IMM 0x%04X\n", val);
- return val;
+ break;
case ATOM_SRC_BYTE0:
case ATOM_SRC_BYTE8:
case ATOM_SRC_BYTE16:
@@ -318,9 +318,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr)++;
if (print)
DEBUG("IMM 0x%02X\n", val);
- return val;
+ break;
}
- break;
+ return val;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 6948ebda0fa2..952737de9411 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -107,7 +107,6 @@ static void cik_sdma_free_microcode(struct amdgpu_device *adev)
static int cik_sdma_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err = 0, i;
DRM_DEBUG("\n");
@@ -133,16 +132,18 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ "amdgpu/%s_sdma.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ "amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
}
out:
if (err) {
- pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name);
+ pr_err("cik_sdma: Failed to load firmware \"%s_sdma%s.bin\"\n",
+ chip_name, i == 0 ? "" : "1");
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 3b0d8d3af58a..18488c02d1cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4116,7 +4116,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
- char fw_name[53];
char ucode_prefix[30];
const char *wks = "";
int err;
@@ -4131,27 +4130,27 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
wks = "_wks";
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", ucode_prefix, wks);
- err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ "amdgpu/%s_pfp%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", ucode_prefix, wks);
- err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ "amdgpu/%s_me%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", ucode_prefix, wks);
- err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ "amdgpu/%s_ce%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
if (!amdgpu_sriov_vf(adev)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
@@ -4166,15 +4165,15 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", ucode_prefix, wks);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", ucode_prefix, wks);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ "amdgpu/%s_mec2%s.bin", ucode_prefix, wks);
if (!err) {
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 19d95455780f..2a510351dfce 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -4415,7 +4415,9 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
false : true;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
- amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
+ /* TODO investigate why this and the hdp flush above is needed,
+ * are we missing a flush somewhere else? */
+ adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
index 9e7ce1e6bc06..9cd221ed240c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
@@ -85,6 +85,7 @@ static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev,
if (entry && (entry->client_id == SOC21_IH_CLIENTID_GFX) &&
(entry->src_id == GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT) &&
!entry->vmid && !entry->pasid) {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
uint32_t rlc_status0 = 0;
rlc_status0 = RREG32_SOC15(GC, 0, regRLC_RLCS_FED_STATUS_0);
@@ -96,7 +97,8 @@ static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev,
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
- amdgpu_ras_reset_gpu(adev);
+ if (con && !con->is_rma)
+ amdgpu_ras_reset_gpu(adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 6419f98e32b6..460bf33a22b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -3192,7 +3192,9 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
false : true;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
- amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
+ /* TODO investigate why this and the hdp flush above is needed,
+ * are we missing a flush somewhere else? */
+ adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d0992ce9fb47..564f0b9336b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -311,7 +311,6 @@ static const u32 verde_rlc_save_restore_register_list[] =
static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err;
const struct gfx_firmware_header_v1_0 *cp_hdr;
const struct rlc_firmware_header_v1_0 *rlc_hdr;
@@ -337,32 +336,32 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ "amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ "amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ "amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
@@ -371,7 +370,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
- pr_err("gfx6: Failed to load firmware \"%s\"\n", fw_name);
+ pr_err("gfx6: Failed to load firmware %s gfx firmware\n", chip_name);
amdgpu_ucode_release(&adev->gfx.pfp_fw);
amdgpu_ucode_release(&adev->gfx.me_fw);
amdgpu_ucode_release(&adev->gfx.ce_fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8f19b6ae8d5b..d84589137df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -909,7 +909,6 @@ static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err;
DRM_DEBUG("\n");
@@ -934,38 +933,38 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ "amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ "amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ "amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ "amdgpu/%s_mec2.bin", chip_name);
if (err)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_rlc.bin", chip_name);
out:
if (err) {
- pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name);
+ pr_err("gfx7: Failed to load firmware %s gfx firmware\n", chip_name);
gfx_v7_0_free_microcode(adev);
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 2f0e72caee1a..b4658c7db0e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -939,7 +939,6 @@ static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
@@ -982,15 +981,15 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
}
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ "amdgpu/%s_pfp_2.bin", chip_name);
if (err == -ENODEV) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ "amdgpu/%s_pfp.bin", chip_name);
}
} else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ "amdgpu/%s_pfp.bin", chip_name);
}
if (err)
goto out;
@@ -999,15 +998,15 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ "amdgpu/%s_me_2.bin", chip_name);
if (err == -ENODEV) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ "amdgpu/%s_me.bin", chip_name);
}
} else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ "amdgpu/%s_me.bin", chip_name);
}
if (err)
goto out;
@@ -1017,15 +1016,15 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ "amdgpu/%s_ce_2.bin", chip_name);
if (err == -ENODEV) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ "amdgpu/%s_ce.bin", chip_name);
}
} else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ "amdgpu/%s_ce.bin", chip_name);
}
if (err)
goto out;
@@ -1044,8 +1043,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
} else
adev->virt.chained_ib_support = false;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -1093,15 +1092,15 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec_2.bin", chip_name);
if (err == -ENODEV) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec.bin", chip_name);
}
} else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec.bin", chip_name);
}
if (err)
goto out;
@@ -1112,15 +1111,15 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ)) {
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ "amdgpu/%s_mec2_2.bin", chip_name);
if (err == -ENODEV) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ "amdgpu/%s_mec2.bin", chip_name);
}
} else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ "amdgpu/%s_mec2.bin", chip_name);
}
if (!err) {
cp_hdr = (const struct gfx_firmware_header_v1_0 *)
@@ -1194,9 +1193,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
- dev_err(adev->dev,
- "gfx8: Failed to load firmware \"%s\"\n",
- fw_name);
+ dev_err(adev->dev, "gfx8: Failed to load firmware %s gfx firmware\n", chip_name);
amdgpu_ucode_release(&adev->gfx.pfp_fw);
amdgpu_ucode_release(&adev->gfx.me_fw);
amdgpu_ucode_release(&adev->gfx.ce_fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ad99ee10fd4a..2929c8972ea7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1378,23 +1378,22 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
char *chip_name)
{
- char fw_name[50];
int err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ "amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ "amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ "amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
@@ -1411,7 +1410,6 @@ out:
static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
char *chip_name)
{
- char fw_name[53];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -1429,20 +1427,22 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
if (!strcmp(chip_name, "picasso") &&
(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_rlc_am4.bin", chip_name);
else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
(smu_version >= 0x41e2b))
/**
*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
*/
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_kicker_rlc.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
- rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
@@ -1466,28 +1466,27 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
char *chip_name)
{
- char fw_name[50];
int err;
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_sjt_mec.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
-
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
+
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec2.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ "amdgpu/%s_sjt_mec2.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
-
- /* ignore failures to load */
- err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ "amdgpu/%s_mec2.bin", chip_name);
if (!err) {
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index aecc2bcea145..8d8763ebe027 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -372,15 +372,14 @@ static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
- char fw_name[30];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
uint16_t version_minor;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ "amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -409,12 +408,10 @@ static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
- char fw_name[30];
int err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
-
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ "amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
@@ -4203,9 +4200,10 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_i
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info)
{
- int i, j, k, counter, xcc_id, active_cu_number = 0;
- u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
+ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
unsigned disable_masks[4 * 4];
+ bool is_symmetric_cus;
if (!adev || !cu_info)
return -EINVAL;
@@ -4223,6 +4221,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
+ is_symmetric_cus = true;
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
@@ -4250,6 +4249,15 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
+ if (i && is_symmetric_cus && prev_counter != counter)
+ is_symmetric_cus = false;
+ prev_counter = counter;
+ }
+ if (is_symmetric_cus) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
}
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
xcc_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index 16d3443f8a82..be78507ec0d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -532,6 +532,9 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) ||
(bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
+ if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ *flags |= AMDGPU_PTE_DCC;
+
/* WA for HW bug */
if (is_system || ((bo_adev != adev) && coherent))
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 3e38d8bfcb69..d36725666b54 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -98,9 +98,7 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err;
- bool is_58_fw = false;
DRM_DEBUG("\n");
@@ -126,17 +124,13 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
/* this memory configuration requires special firmware */
if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
- is_58_fw = true;
+ chip_name = "si58";
- if (is_58_fw)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
if (err) {
dev_err(adev->dev,
- "si_mc: Failed to load firmware \"%s\"\n",
- fw_name);
+ "si_mc: Failed to load firmware \"%s_mc.bin\"\n",
+ chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 85df8fc81065..994432fb57ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -130,7 +130,6 @@ static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err;
DRM_DEBUG("\n");
@@ -153,11 +152,9 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
return -EINVAL;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
-
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
if (err) {
- pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
+ pr_err("cik_mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index fc97757e33d9..86488c052f82 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -212,7 +212,6 @@ static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err;
DRM_DEBUG("\n");
@@ -255,10 +254,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
return -EINVAL;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
if (err) {
- pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
+ pr_err("mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 3e91a8e42c21..a9f5d9e4610d 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -41,7 +41,6 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_1_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
- char fw_name[45];
char ucode_prefix[30];
int err;
const struct imu_firmware_header_v1_0 *imu_hdr;
@@ -50,11 +49,10 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_imu.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
+
imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data;
//adev->gfx.imu_feature_version = le32_to_cpu(imu_hdr->ucode_feature_version);
@@ -75,8 +73,8 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
dev_err(adev->dev,
- "gfx11: Failed to load firmware \"%s\"\n",
- fw_name);
+ "gfx11: Failed to load firmware \"%s_imu.bin\"\n",
+ ucode_prefix);
amdgpu_ucode_release(&adev->gfx.imu_fw);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
index 0c8ef908d112..1341f0292031 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
@@ -39,7 +39,6 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin");
static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char fw_name[40];
char ucode_prefix[15];
int err;
const struct imu_firmware_header_v1_0 *imu_hdr;
@@ -48,11 +47,10 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_imu.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
+
imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data;
adev->gfx.imu_fw_version = le32_to_cpu(imu_hdr->header.ucode_version);
@@ -72,8 +70,8 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
dev_err(adev->dev,
- "gfx12: Failed to load firmware \"%s\"\n",
- fw_name);
+ "gfx12: Failed to load firmware \"%s_imu.bin\"\n",
+ ucode_prefix);
amdgpu_ucode_release(&adev->gfx.imu_fw);
}
@@ -119,7 +117,8 @@ static int imu_v12_0_load_microcode(struct amdgpu_device *adev)
static int imu_v12_0_wait_for_reset_status(struct amdgpu_device *adev)
{
- int i, imu_reg_val = 0;
+ u32 imu_reg_val = 0;
+ int i;
for (i = 0; i < adev->usec_timeout; i++) {
imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_GFX_RESET_CTRL);
@@ -138,7 +137,7 @@ static int imu_v12_0_wait_for_reset_status(struct amdgpu_device *adev)
static void imu_v12_0_setup(struct amdgpu_device *adev)
{
- int imu_reg_val;
+ u32 imu_reg_val;
WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_ACCESS_CTRL0, 0xffffff);
WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_ACCESS_CTRL1, 0xffff);
@@ -157,7 +156,7 @@ static void imu_v12_0_setup(struct amdgpu_device *adev)
static int imu_v12_0_start(struct amdgpu_device *adev)
{
- int imu_reg_val;
+ u32 imu_reg_val;
imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_CORE_CTRL);
imu_reg_val &= 0xfffffffe;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 77595e9622da..71f43a5c7f72 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -581,7 +581,6 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v1_0_decode_ring_vm_funcs;
- DRM_INFO("JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v1_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index ef3e42f6b841..99adf3625657 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -131,16 +131,11 @@ static int jpeg_v2_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
- int r;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
- r = amdgpu_ring_test_helper(ring);
- if (!r)
- DRM_INFO("JPEG decode initialized successfully.\n");
-
- return r;
+ return amdgpu_ring_test_helper(ring);
}
/**
@@ -795,7 +790,6 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v2_0_dec_ring_vm_funcs;
- DRM_INFO("JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index afeaf3c64e27..d8ef95c847c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -196,8 +196,6 @@ static int jpeg_v2_5_hw_init(void *handle)
return r;
}
- DRM_INFO("JPEG decode initialized successfully.\n");
-
return 0;
}
@@ -728,7 +726,6 @@ static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
else /* CHIP_ALDEBARAN */
adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v2_6_dec_ring_vm_funcs;
adev->jpeg.inst[i].ring_dec->me = i;
- DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 1c7cf4800bf7..31cfa3ce6528 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -146,18 +146,11 @@ static int jpeg_v3_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
- int r;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
-
- DRM_INFO("JPEG decode initialized successfully.\n");
-
- return 0;
+ return amdgpu_ring_test_helper(ring);
}
/**
@@ -593,7 +586,6 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v3_0_dec_ring_vm_funcs;
- DRM_INFO("JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 237fe5df5a8f..3dac8f259d7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -181,8 +181,6 @@ static int jpeg_v4_0_hw_init(void *handle)
return r;
}
- DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
-
return 0;
}
@@ -755,7 +753,6 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_dec_ring_vm_funcs;
- DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index d66af11aa66c..04d8966423de 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -341,7 +341,6 @@ static int jpeg_v4_0_3_hw_init(void *handle)
}
}
}
- DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
return 0;
}
@@ -1100,7 +1099,6 @@ static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev)
adev->jpeg.inst[i].aid_id =
jpeg_inst / adev->jpeg.num_inst_per_aid;
}
- DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 4c8f9772437b..f96ac6bce526 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -191,7 +191,6 @@ static int jpeg_v4_0_5_hw_init(void *handle)
// TODO: Enable ring test with DPG support
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
- DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully under DPG Mode");
return 0;
}
@@ -205,9 +204,6 @@ static int jpeg_v4_0_5_hw_init(void *handle)
return r;
}
- if (!r)
- DRM_INFO("JPEG decode initialized successfully under SPG Mode\n");
-
return 0;
}
@@ -805,7 +801,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs;
adev->jpeg.inst[i].ring_dec->me = i;
- DRM_DEV_INFO(adev->dev, "JPEG%d decode is enabled in VM mode\n", i);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 8263b97c4466..3b1f6ad99100 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -154,18 +154,18 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
void *pkt, int size,
int api_status_off)
{
- int ndw = size / 4;
- signed long r;
- union MESAPI__MISC *x_pkt = pkt;
- struct MES_API_STATUS *api_status;
+ union MESAPI__QUERY_MES_STATUS mes_status_pkt;
+ signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
- unsigned long flags;
- signed long timeout = 3000000; /* 3000 ms */
+ struct MES_API_STATUS *api_status;
+ union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
- u32 fence_offset;
- u64 fence_gpu_addr;
- u64 *fence_ptr;
+ unsigned long flags;
+ u64 status_gpu_addr;
+ u32 status_offset;
+ u64 *status_ptr;
+ signed long r;
int ret;
if (x_pkt->header.opcode >= MES_SCH_API_MAX)
@@ -177,28 +177,38 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
timeout = 15 * 600 * 1000;
}
- BUG_ON(size % 4 != 0);
- ret = amdgpu_device_wb_get(adev, &fence_offset);
+ ret = amdgpu_device_wb_get(adev, &status_offset);
if (ret)
return ret;
- fence_gpu_addr =
- adev->wb.gpu_addr + (fence_offset * 4);
- fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
- *fence_ptr = 0;
+
+ status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
+ status_ptr = (u64 *)&adev->wb.wb[status_offset];
+ *status_ptr = 0;
spin_lock_irqsave(&mes->ring_lock, flags);
- if (amdgpu_ring_alloc(ring, ndw)) {
- spin_unlock_irqrestore(&mes->ring_lock, flags);
- amdgpu_device_wb_free(adev, fence_offset);
- return -ENOMEM;
- }
+ r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
+ if (r)
+ goto error_unlock_free;
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
- api_status->api_completion_fence_addr = fence_gpu_addr;
+ api_status->api_completion_fence_addr = status_gpu_addr;
api_status->api_completion_fence_value = 1;
- amdgpu_ring_write_multiple(ring, pkt, ndw);
+ amdgpu_ring_write_multiple(ring, pkt, size / 4);
+
+ memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
+ mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
+ mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+ mes_status_pkt.api_status.api_completion_fence_addr =
+ ring->fence_drv.gpu_addr;
+ mes_status_pkt.api_status.api_completion_fence_value =
+ ++ring->fence_drv.sync_seq;
+
+ amdgpu_ring_write_multiple(ring, &mes_status_pkt,
+ sizeof(mes_status_pkt) / 4);
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
@@ -206,15 +216,16 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
if (misc_op_str)
- dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str);
+ dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
+ misc_op_str);
else if (op_str)
dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
else
- dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode);
+ dev_dbg(adev->dev, "MES msg=%d was emitted\n",
+ x_pkt->header.opcode);
- r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout);
- amdgpu_device_wb_free(adev, fence_offset);
- if (r < 1) {
+ r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
+ if (r < 1 || !*status_ptr) {
if (misc_op_str)
dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
@@ -229,10 +240,19 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
while (halt_if_hws_hang)
schedule();
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto error_wb_free;
}
+ amdgpu_device_wb_free(adev, status_offset);
return 0;
+
+error_unlock_free:
+ spin_unlock_irqrestore(&mes->ring_lock, flags);
+
+error_wb_free:
+ amdgpu_device_wb_free(adev, status_offset);
+ return r;
}
static int convert_to_mes_queue_type(int queue_type)
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index f18fdda023c9..106eef1ff5cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -144,18 +144,18 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
void *pkt, int size,
int api_status_off)
{
- int ndw = size / 4;
- signed long r;
- union MESAPI__MISC *x_pkt = pkt;
- struct MES_API_STATUS *api_status;
+ union MESAPI__QUERY_MES_STATUS mes_status_pkt;
+ signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
- unsigned long flags;
+ struct MES_API_STATUS *api_status;
+ union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
- signed long timeout = 3000000; /* 3000 ms */
- u32 fence_offset;
- u64 fence_gpu_addr;
- u64 *fence_ptr;
+ unsigned long flags;
+ u64 status_gpu_addr;
+ u32 status_offset;
+ u64 *status_ptr;
+ signed long r;
int ret;
if (x_pkt->header.opcode >= MES_SCH_API_MAX)
@@ -167,28 +167,38 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
timeout = 15 * 600 * 1000;
}
- BUG_ON(size % 4 != 0);
- ret = amdgpu_device_wb_get(adev, &fence_offset);
+ ret = amdgpu_device_wb_get(adev, &status_offset);
if (ret)
return ret;
- fence_gpu_addr =
- adev->wb.gpu_addr + (fence_offset * 4);
- fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
- *fence_ptr = 0;
+
+ status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
+ status_ptr = (u64 *)&adev->wb.wb[status_offset];
+ *status_ptr = 0;
spin_lock_irqsave(&mes->ring_lock, flags);
- if (amdgpu_ring_alloc(ring, ndw)) {
- spin_unlock_irqrestore(&mes->ring_lock, flags);
- amdgpu_device_wb_free(adev, fence_offset);
- return -ENOMEM;
- }
+ r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
+ if (r)
+ goto error_unlock_free;
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
- api_status->api_completion_fence_addr = fence_gpu_addr;
+ api_status->api_completion_fence_addr = status_gpu_addr;
api_status->api_completion_fence_value = 1;
- amdgpu_ring_write_multiple(ring, pkt, ndw);
+ amdgpu_ring_write_multiple(ring, pkt, size / 4);
+
+ memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
+ mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
+ mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+ mes_status_pkt.api_status.api_completion_fence_addr =
+ ring->fence_drv.gpu_addr;
+ mes_status_pkt.api_status.api_completion_fence_value =
+ ++ring->fence_drv.sync_seq;
+
+ amdgpu_ring_write_multiple(ring, &mes_status_pkt,
+ sizeof(mes_status_pkt) / 4);
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
@@ -196,16 +206,17 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
misc_op_str = mes_v12_0_get_misc_op_string(x_pkt);
if (misc_op_str)
- dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str);
+ dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
+ misc_op_str);
else if (op_str)
dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
else
- dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode);
+ dev_dbg(adev->dev, "MES msg=%d was emitted\n",
+ x_pkt->header.opcode);
- r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout);
- amdgpu_device_wb_free(adev, fence_offset);
+ r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
+ if (r < 1 || !*status_ptr) {
- if (r < 1) {
if (misc_op_str)
dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
op_str, misc_op_str);
@@ -219,10 +230,19 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
while (halt_if_hws_hang)
schedule();
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto error_wb_free;
}
+ amdgpu_device_wb_free(adev, status_offset);
return 0;
+
+error_unlock_free:
+ spin_unlock_irqrestore(&mes->ring_lock, flags);
+
+error_wb_free:
+ amdgpu_device_wb_free(adev, status_offset);
+ return r;
}
static int convert_to_mes_queue_type(int queue_type)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 92432cd2c0c7..9689e2b5d4e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -544,7 +544,7 @@ static int mmhub_v1_7_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v1_7_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- int data, data1;
+ u32 data, data1;
if (amdgpu_sriov_vf(adev))
*flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 02fd45261399..a0cc8e218ca1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -671,7 +671,7 @@ static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- int data, data1;
+ u32 data, data1;
if (amdgpu_sriov_vf(adev))
*flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index 8928f9160822..b4ce3375d3fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -613,7 +613,7 @@ static int mmhub_v3_3_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v3_3_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- int data;
+ u32 data;
if (amdgpu_sriov_vf(adev))
*flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 1b7da4aff2b8..ff1b58e44689 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -657,7 +657,7 @@ static int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- int data, data1;
+ u32 data, data1;
if (amdgpu_sriov_vf(adev))
*flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index f4c47492e0cd..6b71ee85ee65 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -249,38 +249,30 @@ static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
return 0;
}
-static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
+static void xgpu_ai_ready_to_reset(struct amdgpu_device *adev)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
- struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
- int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
-
- /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
- * otherwise the mailbox msg will be ruined/reseted by
- * the VF FLR.
- */
- if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
- return;
-
- down_write(&adev->reset_domain->sem);
-
- amdgpu_virt_fini_data_exchange(adev);
-
xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+}
+static int xgpu_ai_wait_reset(struct amdgpu_device *adev)
+{
+ int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
- goto flr_done;
-
+ return 0;
msleep(10);
timeout -= 10;
} while (timeout > 1);
-
dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
+ return -ETIME;
+}
-flr_done:
- atomic_set(&adev->reset_domain->in_gpu_reset, 0);
- up_write(&adev->reset_domain->sem);
+static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ amdgpu_virt_fini_data_exchange(adev);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
@@ -417,7 +409,8 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset,
- .wait_reset = NULL,
+ .ready_to_reset = xgpu_ai_ready_to_reset,
+ .wait_reset = xgpu_ai_wait_reset,
.trans_msg = xgpu_ai_mailbox_trans_msg,
.req_init_data = xgpu_ai_request_init_data,
.ras_poison_handler = xgpu_ai_ras_poison_handler,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 37b49a5ed2a1..22af30a15a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -282,38 +282,30 @@ static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
return 0;
}
-static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
+static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
- struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
- int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
-
- /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
- * otherwise the mailbox msg will be ruined/reseted by
- * the VF FLR.
- */
- if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
- return;
-
- down_write(&adev->reset_domain->sem);
-
- amdgpu_virt_fini_data_exchange(adev);
-
xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+}
+static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
+{
+ int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
do {
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
- goto flr_done;
-
+ return 0;
msleep(10);
timeout -= 10;
} while (timeout > 1);
-
dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n");
+ return -ETIME;
+}
-flr_done:
- atomic_set(&adev->reset_domain->in_gpu_reset, 0);
- up_write(&adev->reset_domain->sem);
+static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ amdgpu_virt_fini_data_exchange(adev);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
@@ -455,7 +447,8 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
.req_init_data = xgpu_nv_request_init_data,
.reset_gpu = xgpu_nv_request_reset,
- .wait_reset = NULL,
+ .ready_to_reset = xgpu_nv_ready_to_reset,
+ .wait_reset = xgpu_nv_wait_reset,
.trans_msg = xgpu_nv_mailbox_trans_msg,
.ras_poison_handler = xgpu_nv_ras_poison_handler,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 78cd07744ebe..e1d63bed84bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -515,12 +515,6 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
- /* wait until RCV_MSG become 3 */
- if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
- pr_err("failed to receive FLR_CMPL\n");
- return;
- }
-
/* Trigger recovery due to world switch failure */
if (amdgpu_device_should_recover_gpu(adev)) {
struct amdgpu_reset_context reset_context;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 32cc60ce5521..8d80df94bd8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -414,6 +414,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
*/
+ amdgpu_ras_set_fed(adev, true);
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index cc0248efa6b6..4d33c95a5116 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -32,7 +32,9 @@
#include "mp/mp_14_0_2_sh_mask.h"
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -66,6 +68,9 @@ static int psp_v14_0_init_microcode(struct psp_context *psp)
err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index ac8a9b9b3e52..725392522267 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -127,7 +127,6 @@ static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err = 0, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
@@ -145,10 +144,11 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ "amdgpu/%s_sdma.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ "amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
@@ -169,7 +169,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
- pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name);
+ pr_err("sdma_v2_4: Failed to load firmware \"%s_sdma%s.bin\"\n",
+ chip_name, i == 0 ? "" : "1");
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b8ebdc4ae6f6..aa637541da58 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -267,7 +267,6 @@ static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err = 0, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
@@ -305,10 +304,11 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ "amdgpu/%s_sdma.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ "amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
@@ -327,7 +327,8 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
}
out:
if (err) {
- pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
+ pr_err("sdma_v3_0: Failed to load firmware \"%s_sdma%s.bin\"\n",
+ chip_name, i == 0 ? "" : "1");
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0_0_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0_0_pkt_open.h
index 6af23e7888ca..d8cf830916b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0_0_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0_0_pkt_open.h
@@ -91,6 +91,14 @@
#define SDMA_GCR_GLM_WB (1 << 4)
#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
+
+#define SDMA_DCC_DATA_FORMAT(x) ((x) & 0x3f)
+#define SDMA_DCC_NUM_TYPE(x) (((x) & 0x7) << 9)
+#define SDMA_DCC_READ_CM(x) (((x) & 0x3) << 16)
+#define SDMA_DCC_WRITE_CM(x) (((x) & 0x3) << 18)
+#define SDMA_DCC_MAX_COM(x) (((x) & 0x3) << 24)
+#define SDMA_DCC_MAX_UCOM(x) (((x) & 0x1) << 26)
+
/*
** Definitions for SDMA_PKT_COPY_LINEAR packet
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index ab1dea77be6e..96514fd77e35 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1568,13 +1568,22 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
{
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
- SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0) |
+ SDMA_PKT_COPY_LINEAR_HEADER_CPV((copy_flags &
+ (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)) ? 1 : 0);
+
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+
+ if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)))
+ ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(4) | SDMA_DCC_NUM_TYPE(4) |
+ ((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
+ ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
+ SDMA_DCC_MAX_COM(1) | SDMA_DCC_MAX_UCOM(1);
}
/**
@@ -1603,7 +1612,6 @@ static const struct amdgpu_buffer_funcs sdma_v7_0_buffer_funcs = {
.copy_max_bytes = 0x400000,
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v7_0_emit_copy_buffer,
-
.fill_max_bytes = 0x400000,
.fill_num_dw = 5,
.emit_fill_buffer = sdma_v7_0_emit_fill_buffer,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index cb253bd3a2a2..a280b9fecb77 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -202,24 +202,17 @@ static int vcn_v1_0_hw_init(void *handle)
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
}
ring = adev->jpeg.inst->ring_dec;
r = amdgpu_ring_test_helper(ring);
- if (r)
- goto done;
-
-done:
- if (!r)
- DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
return r;
}
@@ -2043,7 +2036,6 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
- DRM_INFO("VCN decode is enabled in VM mode\n");
}
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
@@ -2052,8 +2044,6 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
-
- DRM_INFO("VCN encode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index f18fd61c435e..d3d096909a7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -237,7 +237,7 @@ static int vcn_v2_0_hw_init(void *handle)
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
//Disable vcn decode for sriov
if (amdgpu_sriov_vf(adev))
@@ -247,15 +247,10 @@ static int vcn_v2_0_hw_init(void *handle)
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
}
-done:
- if (!r)
- DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
-
- return r;
+ return 0;
}
/**
@@ -2074,7 +2069,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
- DRM_INFO("VCN decode is enabled in VM mode\n");
}
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
@@ -2083,8 +2077,6 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
-
- DRM_INFO("VCN encode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index baec14bde2a2..96f60c303161 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -314,22 +314,17 @@ static int vcn_v2_5_hw_init(void *handle)
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
}
}
}
-done:
- if (!r)
- DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
-
return r;
}
@@ -1710,7 +1705,6 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
continue;
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
adev->vcn.inst[i].ring_dec.me = i;
- DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
}
}
@@ -1725,7 +1719,6 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
adev->vcn.inst[j].ring_enc[i].me = j;
}
- DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 6b31cf4b8aac..24f947751c46 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -303,7 +303,7 @@ static int vcn_v3_0_hw_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
r = vcn_v3_0_start_sriov(adev);
if (r)
- goto done;
+ return r;
/* initialize VCN dec and enc ring buffers */
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -348,24 +348,18 @@ static int vcn_v3_0_hw_init(void *handle)
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
}
}
}
return 0;
-done:
- if (!r)
- DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
-
- return r;
}
/**
@@ -2041,8 +2035,6 @@ static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
else
adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs;
adev->vcn.inst[i].ring_dec.me = i;
- DRM_INFO("VCN(%d) decode%s is enabled in VM mode\n", i,
- DEC_SW_RING_ENABLED?"(Software Ring)":"");
}
}
@@ -2058,8 +2050,6 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[j].me = i;
}
- if (adev->vcn.num_enc_rings > 0)
- DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index ac1b8ead03b3..f6d96a44d75f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -258,7 +258,7 @@ static int vcn_v4_0_hw_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
r = vcn_v4_0_start_sriov(adev);
if (r)
- goto done;
+ return r;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -269,7 +269,6 @@ static int vcn_v4_0_hw_init(void *handle)
ring->wptr_old = 0;
vcn_v4_0_unified_ring_set_wptr(ring);
ring->sched.ready = true;
-
}
} else {
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -283,18 +282,11 @@ static int vcn_v4_0_hw_init(void *handle)
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
-
+ return r;
}
}
return 0;
-done:
- if (!r)
- DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
-
- return r;
}
/**
@@ -1900,8 +1892,6 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
adev->vcn.inst[i].ring_enc[0].funcs =
(const struct amdgpu_ring_funcs *)&vcn_v4_0_unified_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[0].me = i;
-
- DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 2279d8fce03d..f53054e39ebb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -210,7 +210,7 @@ static int vcn_v4_0_3_hw_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
r = vcn_v4_0_3_start_sriov(adev);
if (r)
- goto done;
+ return r;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -246,15 +246,10 @@ static int vcn_v4_0_3_hw_init(void *handle)
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
}
}
-done:
- if (!r)
- DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n",
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
-
return r;
}
@@ -1450,7 +1445,6 @@ static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
adev->vcn.inst[i].aid_id =
vcn_inst / adev->vcn.num_inst_per_aid;
}
- DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n");
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 81fb99729f37..f45495de6875 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -234,16 +234,10 @@ static int vcn_v4_0_5_hw_init(void *handle)
r = amdgpu_ring_test_helper(ring);
if (r)
- goto done;
+ return r;
}
return 0;
-done:
- if (!r)
- DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
-
- return r;
}
/**
@@ -1558,8 +1552,6 @@ static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev)
adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_5_unified_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[0].me = i;
-
- DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 665122d1bbbd..02f7ba8c93cd 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -1136,7 +1136,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x705d0000, 0x807c817c,
0x8070ff70, 0x00000080,
0xbf0a7b7c, 0xbf85fff8,
- 0xbf82013d, 0xbef4037e,
+ 0xbf82013f, 0xbef4037e,
0x8775ff7f, 0x0000ffff,
0x8875ff75, 0x00040000,
0xbef60380, 0xbef703ff,
@@ -1275,7 +1275,8 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x80788478, 0xbf8c0000,
0xb9eef815, 0xbefc036f,
0xbefe0370, 0xbeff0371,
- 0xb9f9f816, 0xb9fbf803,
+ 0xb9f9f816, 0xb9fb4803,
+ 0x907b8b7b, 0xb9fba2c3,
0xb9f3f801, 0xb96e3a05,
0x806e816e, 0xbf0d9972,
0xbf850002, 0x8f6e896e,
@@ -2544,7 +2545,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xe0704000, 0x705d0000,
0x807c817c, 0x8070ff70,
0x00000080, 0xbf0a7b7c,
- 0xbf85fff8, 0xbf820134,
+ 0xbf85fff8, 0xbf820136,
0xbef4037e, 0x8775ff7f,
0x0000ffff, 0x8875ff75,
0x00040000, 0xbef60380,
@@ -2683,7 +2684,8 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xf0000000, 0x80788478,
0xbf8c0000, 0xb9eef815,
0xbefc036f, 0xbefe0370,
- 0xbeff0371, 0xb9fbf803,
+ 0xbeff0371, 0xb9fb4803,
+ 0x907b8b7b, 0xb9fba2c3,
0xb9f3f801, 0xb96e3a05,
0x806e816e, 0xbf0d9972,
0xbf850002, 0x8f6e896e,
@@ -2981,7 +2983,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0x701d0000, 0x807d817d,
0x8070ff70, 0x00000080,
0xbf0a7b7d, 0xbfa2fff8,
- 0xbfa0013f, 0xbef4007e,
+ 0xbfa00143, 0xbef4007e,
0x8b75ff7f, 0x0000ffff,
0x8c75ff75, 0x00040000,
0xbef60080, 0xbef700ff,
@@ -3123,7 +3125,9 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0x80788478, 0xbf890000,
0xb96ef815, 0xbefd006f,
0xbefe0070, 0xbeff0071,
- 0xb97bf803, 0xb973f801,
+ 0xb97b4803, 0x857b8b7b,
+ 0xb97b22c3, 0x857b867b,
+ 0xb97b7443, 0xb973f801,
0xb8ee3b05, 0x806e816e,
0xbf0d9972, 0xbfa20002,
0x846e896e, 0xbfa00001,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index ac3702b8e3c4..44772eec9ef4 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -119,9 +119,12 @@ var SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT = 7
var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+var SQ_WAVE_TRAPSTS_ILLEGAL_INST_SHIFT = 11
var SQ_WAVE_TRAPSTS_EXCP_HI_MASK = 0x7000
#if ASIC_FAMILY >= CHIP_PLUM_BONITO
+var SQ_WAVE_TRAPSTS_HOST_TRAP_SHIFT = 16
var SQ_WAVE_TRAPSTS_WAVE_START_MASK = 0x20000
+var SQ_WAVE_TRAPSTS_WAVE_START_SHIFT = 17
var SQ_WAVE_TRAPSTS_WAVE_END_MASK = 0x40000
var SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK = 0x100000
#endif
@@ -137,14 +140,23 @@ var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000
var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
+var S_TRAPSTS_RESTORE_PART_1_SIZE = SQ_WAVE_TRAPSTS_SAVECTX_SHIFT
+var S_TRAPSTS_RESTORE_PART_2_SHIFT = SQ_WAVE_TRAPSTS_ILLEGAL_INST_SHIFT
+
#if ASIC_FAMILY < CHIP_PLUM_BONITO
var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+var S_TRAPSTS_RESTORE_PART_2_SIZE = 32 - S_TRAPSTS_RESTORE_PART_2_SHIFT
+var S_TRAPSTS_RESTORE_PART_3_SHIFT = 0
+var S_TRAPSTS_RESTORE_PART_3_SIZE = 0
#else
var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK |\
SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK |\
SQ_WAVE_TRAPSTS_WAVE_START_MASK |\
SQ_WAVE_TRAPSTS_WAVE_END_MASK |\
SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK
+var S_TRAPSTS_RESTORE_PART_2_SIZE = SQ_WAVE_TRAPSTS_HOST_TRAP_SHIFT - SQ_WAVE_TRAPSTS_ILLEGAL_INST_SHIFT
+var S_TRAPSTS_RESTORE_PART_3_SHIFT = SQ_WAVE_TRAPSTS_WAVE_START_SHIFT
+var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
#endif
var S_TRAPSTS_HWREG = HW_REG_TRAPSTS
var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_TRAPSTS_SAVECTX_MASK
@@ -157,6 +169,7 @@ var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20
var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40
var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6
var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80
+var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7
var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100
var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8
var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200
@@ -173,6 +186,11 @@ var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\
SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\
SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\
SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
+var S_TRAPSTS_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
+var S_TRAPSTS_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var S_TRAPSTS_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var S_TRAPSTS_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
+var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
var BARRIER_STATE_SIGNAL_OFFSET = 16
var BARRIER_STATE_VALID_OFFSET = 0
#endif
@@ -1386,17 +1404,17 @@ L_SKIP_BARRIER_RESTORE:
s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask
#endif
-#if ASIC_FAMILY < CHIP_GFX12
- s_setreg_b32 hwreg(S_TRAPSTS_HWREG), s_restore_trapsts
-#else
- // EXCP_FLAG_PRIV.SAVE_CONTEXT and HOST_TRAP may have changed.
+ // {TRAPSTS/EXCP_FLAG_PRIV}.SAVE_CONTEXT and HOST_TRAP may have changed.
// Only restore the other fields to avoid clobbering them.
- s_setreg_b32 hwreg(S_TRAPSTS_HWREG, 0, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT), s_restore_trapsts
- s_lshr_b32 s_restore_trapsts, s_restore_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
- s_setreg_b32 hwreg(S_TRAPSTS_HWREG, SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT, 1), s_restore_trapsts
- s_lshr_b32 s_restore_trapsts, s_restore_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
- s_setreg_b32 hwreg(S_TRAPSTS_HWREG, SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT, 32 - SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT), s_restore_trapsts
-#endif
+ s_setreg_b32 hwreg(S_TRAPSTS_HWREG, 0, S_TRAPSTS_RESTORE_PART_1_SIZE), s_restore_trapsts
+ s_lshr_b32 s_restore_trapsts, s_restore_trapsts, S_TRAPSTS_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(S_TRAPSTS_HWREG, S_TRAPSTS_RESTORE_PART_2_SHIFT, S_TRAPSTS_RESTORE_PART_2_SIZE), s_restore_trapsts
+
+if S_TRAPSTS_RESTORE_PART_3_SIZE > 0
+ s_lshr_b32 s_restore_trapsts, s_restore_trapsts, S_TRAPSTS_RESTORE_PART_3_SHIFT - S_TRAPSTS_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(S_TRAPSTS_HWREG, S_TRAPSTS_RESTORE_PART_3_SHIFT, S_TRAPSTS_RESTORE_PART_3_SIZE), s_restore_trapsts
+end
+
s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
// Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 52be4e340fb1..6040ee7918bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -936,7 +936,6 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd,
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
kfd_smi_event_update_gpu_reset(node, false, reset_context);
- node->dqm->ops.pre_reset(node->dqm);
}
kgd2kfd_suspend(kfd, false);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 485e26abc619..d2fceb6f9802 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -35,6 +35,7 @@
#include "cik_regs.h"
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_reset.h"
#include "mes_v11_api_def.h"
#include "kfd_debug.h"
@@ -155,14 +156,7 @@ static void kfd_hws_hang(struct device_queue_manager *dqm)
/*
* Issue a GPU reset if HWS is unresponsive
*/
- dqm->is_hws_hang = true;
-
- /* It's possible we're detecting a HWS hang in the
- * middle of a GPU reset. No need to schedule another
- * reset in this case.
- */
- if (!dqm->is_resetting)
- schedule_work(&dqm->hw_exception_work);
+ schedule_work(&dqm->hw_exception_work);
}
static int convert_to_mes_queue_type(int queue_type)
@@ -194,7 +188,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
int r, queue_type;
uint64_t wptr_addr_off;
- if (dqm->is_hws_hang)
+ if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
@@ -245,6 +239,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
+ up_read(&adev->reset_domain->sem);
if (r) {
dev_err(adev->dev, "failed to add hardware queue to MES, doorbell=0x%x\n",
q->properties.doorbell_off);
@@ -262,7 +257,7 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
int r;
struct mes_remove_queue_input queue_input;
- if (dqm->is_hws_hang)
+ if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
@@ -272,6 +267,7 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
+ up_read(&adev->reset_domain->sem);
if (r) {
dev_err(adev->dev, "failed to remove hardware queue from MES, doorbell=0x%x\n",
@@ -1468,20 +1464,13 @@ static int stop_nocpsch(struct device_queue_manager *dqm)
}
if (dqm->dev->adev->asic_type == CHIP_HAWAII)
- pm_uninit(&dqm->packet_mgr, false);
+ pm_uninit(&dqm->packet_mgr);
dqm->sched_running = false;
dqm_unlock(dqm);
return 0;
}
-static void pre_reset(struct device_queue_manager *dqm)
-{
- dqm_lock(dqm);
- dqm->is_resetting = true;
- dqm_unlock(dqm);
-}
-
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q, const uint32_t *restore_sdma_id)
{
@@ -1669,8 +1658,6 @@ static int start_cpsch(struct device_queue_manager *dqm)
init_interrupts(dqm);
/* clear hang status when driver try to start the hw scheduler */
- dqm->is_hws_hang = false;
- dqm->is_resetting = false;
dqm->sched_running = true;
if (!dqm->dev->kfd->shared_resources.enable_mes)
@@ -1700,7 +1687,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
fail_allocate_vidmem:
fail_set_sched_resources:
if (!dqm->dev->kfd->shared_resources.enable_mes)
- pm_uninit(&dqm->packet_mgr, false);
+ pm_uninit(&dqm->packet_mgr);
fail_packet_manager_init:
dqm_unlock(dqm);
return retval;
@@ -1708,22 +1695,17 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
- bool hanging;
-
dqm_lock(dqm);
if (!dqm->sched_running) {
dqm_unlock(dqm);
return 0;
}
- if (!dqm->is_hws_hang) {
- if (!dqm->dev->kfd->shared_resources.enable_mes)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
- else
- remove_all_queues_mes(dqm);
- }
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
+ else
+ remove_all_queues_mes(dqm);
- hanging = dqm->is_hws_hang || dqm->is_resetting;
dqm->sched_running = false;
if (!dqm->dev->kfd->shared_resources.enable_mes)
@@ -1731,7 +1713,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
if (!dqm->dev->kfd->shared_resources.enable_mes)
- pm_uninit(&dqm->packet_mgr, hanging);
+ pm_uninit(&dqm->packet_mgr);
dqm_unlock(dqm);
return 0;
@@ -1957,24 +1939,24 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
struct device *dev = dqm->dev->adev->dev;
struct mqd_manager *mqd_mgr;
- int retval = 0;
+ int retval;
if (!dqm->sched_running)
return 0;
- if (dqm->is_hws_hang || dqm->is_resetting)
- return -EIO;
if (!dqm->active_runlist)
- return retval;
+ return 0;
+ if (!down_read_trylock(&dqm->dev->adev->reset_domain->sem))
+ return -EIO;
if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
if (retval)
- return retval;
+ goto out;
}
retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
if (retval)
- return retval;
+ goto out;
*dqm->fence_addr = KFD_FENCE_INIT;
pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
@@ -1985,7 +1967,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (retval) {
dev_err(dev, "The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
kfd_hws_hang(dqm);
- return retval;
+ goto out;
}
/* In the current MEC firmware implementation, if compute queue
@@ -2001,7 +1983,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
while (halt_if_hws_hang)
schedule();
kfd_hws_hang(dqm);
- return -ETIME;
+ retval = -ETIME;
+ goto out;
}
/* We need to reset the grace period value for this device */
@@ -2014,6 +1997,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
pm_release_ib(&dqm->packet_mgr);
dqm->active_runlist = false;
+out:
+ up_read(&dqm->dev->adev->reset_domain->sem);
return retval;
}
@@ -2040,13 +2025,13 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
{
int retval;
- if (dqm->is_hws_hang)
+ if (!down_read_trylock(&dqm->dev->adev->reset_domain->sem))
return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false);
- if (retval)
- return retval;
-
- return map_queues_cpsch(dqm);
+ if (!retval)
+ retval = map_queues_cpsch(dqm);
+ up_read(&dqm->dev->adev->reset_domain->sem);
+ return retval;
}
static int wait_on_destroy_queue(struct device_queue_manager *dqm,
@@ -2427,10 +2412,12 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD);
- if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
+ if ((retval || qpd->reset_wavefronts) &&
+ down_read_trylock(&dqm->dev->adev->reset_domain->sem)) {
pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
qpd->reset_wavefronts = false;
+ up_read(&dqm->dev->adev->reset_domain->sem);
}
/* Lastly, free mqd resources.
@@ -2537,7 +2524,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
dqm->ops.initialize = initialize_cpsch;
dqm->ops.start = start_cpsch;
dqm->ops.stop = stop_cpsch;
- dqm->ops.pre_reset = pre_reset;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.register_process = register_process;
@@ -2558,7 +2544,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
/* initialize dqm for no cp scheduling */
dqm->ops.start = start_nocpsch;
dqm->ops.stop = stop_nocpsch;
- dqm->ops.pre_reset = pre_reset;
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index fcc0ee67f544..3b9b8eabaacc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -152,7 +152,6 @@ struct device_queue_manager_ops {
int (*initialize)(struct device_queue_manager *dqm);
int (*start)(struct device_queue_manager *dqm);
int (*stop)(struct device_queue_manager *dqm);
- void (*pre_reset)(struct device_queue_manager *dqm);
void (*uninitialize)(struct device_queue_manager *dqm);
int (*create_kernel_queue)(struct device_queue_manager *dqm,
struct kernel_queue *kq,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index e1c21d250611..816800555f7f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -164,7 +164,10 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
block = AMDGPU_RAS_BLOCK__GFX;
- reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
case SOC15_IH_CLIENTID_VMC:
case SOC15_IH_CLIENTID_VMC1:
@@ -177,7 +180,10 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
block = AMDGPU_RAS_BLOCK__SDMA;
- reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
default:
dev_warn(dev->adev->dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 32c926986dbb..3ea75a9d86ec 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -32,6 +32,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers.h"
#include "kfd_pm4_opcodes.h"
+#include "amdgpu_reset.h"
#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
@@ -196,15 +197,17 @@ err_get_kernel_doorbell:
}
/* Uninitialize a kernel queue and free all its memory usages. */
-static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
+static void kq_uninitialize(struct kernel_queue *kq)
{
- if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging)
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && down_read_trylock(&kq->dev->adev->reset_domain->sem)) {
kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
kq->queue->pipe,
kq->queue->queue);
+ up_read(&kq->dev->adev->reset_domain->sem);
+ }
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
@@ -344,9 +347,9 @@ struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
return NULL;
}
-void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
+void kernel_queue_uninit(struct kernel_queue *kq)
{
- kq_uninitialize(kq, hanging);
+ kq_uninitialize(kq);
kfree(kq);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 7332ad94eab8..a05d5c1097a8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -263,10 +263,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
return 0;
}
-void pm_uninit(struct packet_manager *pm, bool hanging)
+void pm_uninit(struct packet_manager *pm)
{
mutex_destroy(&pm->lock);
- kernel_queue_uninit(pm->priv_queue, hanging);
+ kernel_queue_uninit(pm->priv_queue);
pm->priv_queue = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index c51e908f6f19..2b3ec92981e8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1301,7 +1301,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type);
-void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
+void kernel_queue_uninit(struct kernel_queue *kq);
int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
/* Process Queue Manager */
@@ -1407,7 +1407,7 @@ extern const struct packet_manager_funcs kfd_v9_pm_funcs;
extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs;
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
-void pm_uninit(struct packet_manager *pm, bool hanging);
+void pm_uninit(struct packet_manager *pm);
int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res);
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 6bf79c435f2e..21f5a1fb3bf8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -28,6 +28,7 @@
#include "kfd_priv.h"
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_reset.h"
static inline struct process_queue_node *get_queue_by_qid(
struct process_queue_manager *pqm, unsigned int qid)
@@ -87,8 +88,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
return;
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
- if (dev->kfd->shared_resources.enable_mes)
- amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr);
+ if (dev->kfd->shared_resources.enable_mes &&
+ down_read_trylock(&dev->adev->reset_domain->sem)) {
+ amdgpu_mes_flush_shader_debugger(dev->adev,
+ pdd->proc_ctx_gpu_addr);
+ up_read(&dev->adev->reset_domain->sem);
+ }
pdd->already_dequeued = true;
}
@@ -434,7 +439,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
err_create_queue:
uninit_queue(q);
if (kq)
- kernel_queue_uninit(kq, false);
+ kernel_queue_uninit(kq);
kfree(pqn);
err_allocate_pqn:
/* check if queues list is empty unregister process from device */
@@ -481,7 +486,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
/* destroy kernel queue (DIQ) */
dqm = pqn->kq->dev->dqm;
dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
- kernel_queue_uninit(pqn->kq, false);
+ kernel_queue_uninit(pqn->kq);
}
if (pqn->q) {
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 8297fbce7749..89d605de0595 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -37,6 +37,13 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dpp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hubbub
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dccg
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hubp
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dio
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dwb
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hpo
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mmhubbub
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mpc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/opp
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/pg
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ac18cddeb191..c96407379224 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -77,9 +77,11 @@
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
+#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
#include <linux/dmi.h>
+#include <linux/sort.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -375,6 +377,20 @@ static inline void reverse_planes_order(struct dc_surface_update *array_of_surfa
swap(array_of_surface_update[i], array_of_surface_update[j]);
}
+/*
+ * DC will program planes with their z-order determined by their ordering
+ * in the dc_surface_updates array. This comparator is used to sort them
+ * by descending zpos.
+ */
+static int dm_plane_layer_index_cmp(const void *a, const void *b)
+{
+ const struct dc_surface_update *sa = (struct dc_surface_update *)a;
+ const struct dc_surface_update *sb = (struct dc_surface_update *)b;
+
+ /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */
+ return sb->surface->layer_index - sa->surface->layer_index;
+}
+
/**
* update_planes_and_stream_adapter() - Send planes to be updated in DC
*
@@ -399,7 +415,8 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
struct dc_stream_update *stream_update,
struct dc_surface_update *array_of_surface_update)
{
- reverse_planes_order(array_of_surface_update, planes_count);
+ sort(array_of_surface_update, planes_count,
+ sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL);
/*
* Previous frame finished and HW is ready for optimization.
@@ -774,9 +791,9 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
- DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
- else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
else
DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
notify->type, link_index);
@@ -788,10 +805,13 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
drm_connector_list_iter_end(&iter);
if (hpd_aconnector) {
- if (notify->type == DMUB_NOTIFICATION_HPD)
+ if (notify->type == DMUB_NOTIFICATION_HPD) {
+ if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
+ DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
handle_hpd_irq_helper(hpd_aconnector);
- else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
handle_hpd_rx_irq(hpd_aconnector);
+ }
}
}
@@ -859,7 +879,31 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct dmcub_trace_buf_entry entry = { 0 };
u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
- struct dc_link *plink = NULL;
+ static const char *const event_type[] = {
+ "NO_DATA",
+ "AUX_REPLY",
+ "HPD",
+ "HPD_IRQ",
+ "SET_CONFIGC_REPLY",
+ "DPIA_NOTIFICATION",
+ };
+
+ do {
+ if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+ trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+ entry.param0, entry.param1);
+
+ DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+ } else
+ break;
+
+ count++;
+
+ } while (count <= DMUB_TRACE_MAX_READ);
+
+ if (count > DMUB_TRACE_MAX_READ)
+ DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
if (dc_enable_dmub_notifications(adev->dm.dc) &&
irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
@@ -871,7 +915,8 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
continue;
}
if (!dm->dmub_callback[notify.type]) {
- DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
+ DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
+ event_type[notify.type]);
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
@@ -889,37 +934,12 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
dmub_hpd_wrk->adev = adev;
- if (notify.type == DMUB_NOTIFICATION_HPD) {
- plink = adev->dm.dc->links[notify.link_index];
- if (plink) {
- plink->hpd_status =
- notify.hpd_status == DP_HPD_PLUG;
- }
- }
queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
} else {
dm->dmub_callback[notify.type](adev, &notify);
}
} while (notify.pending_notification);
}
-
-
- do {
- if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
- trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
- entry.param0, entry.param1);
-
- DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
- entry.trace_code, entry.tick_count, entry.param0, entry.param1);
- } else
- break;
-
- count++;
-
- } while (count <= DMUB_TRACE_MAX_READ);
-
- if (count > DMUB_TRACE_MAX_READ)
- DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
}
static int dm_set_clockgating_state(void *handle,
@@ -957,8 +977,8 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
list_for_each_entry(mode, &connector->modes, head) {
- if (max_size < mode->htotal * mode->vtotal)
- max_size = mode->htotal * mode->vtotal;
+ if (max_size < (unsigned long) mode->htotal * mode->vtotal)
+ max_size = (unsigned long) mode->htotal * mode->vtotal;
}
if (max_size) {
@@ -2855,7 +2875,8 @@ static int dm_suspend(void *handle)
dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
- dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+ if (dm->cached_dc_state)
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
amdgpu_dm_commit_zero_streams(dm->dc);
@@ -3159,7 +3180,7 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created end sink
* MST connectors, should be skipped
*/
- if (aconnector && aconnector->mst_root)
+ if (aconnector->mst_root)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -4571,6 +4592,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct drm_device *drm = aconnector->base.dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
+ struct amdgpu_dm_backlight_caps caps = { 0 };
char bl_name[16];
if (aconnector->bl_idx == -1)
@@ -4583,8 +4605,16 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
return;
}
+ amdgpu_acpi_get_backlight_caps(&caps);
+ if (caps.caps_valid) {
+ if (power_supply_is_system_supplied() > 0)
+ props.brightness = caps.ac_level;
+ else
+ props.brightness = caps.dc_level;
+ } else
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
+
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
- props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -6392,13 +6422,13 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+ if (sink->sink_signal == SIGNAL_TYPE_EDP &&
!aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
- } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
@@ -7073,7 +7103,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
aconnector->dc_sink = aconnector->dc_link->local_sink ?
aconnector->dc_link->local_sink :
aconnector->dc_em_sink;
- dc_sink_retain(aconnector->dc_sink);
+ if (aconnector->dc_sink)
+ dc_sink_retain(aconnector->dc_sink);
}
}
@@ -7900,7 +7931,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
- amdgpu_dm_connector_add_common_modes(encoder, connector);
+ if (encoder)
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, edid);
}
amdgpu_dm_fbc_init(connector);
@@ -8745,8 +8777,24 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* Disable the cursor first if we're disabling all the planes.
* It'll remain on the screen after the planes are re-enabled
* if we don't.
+ *
+ * If the cursor is transitioning from native to overlay mode, the
+ * native cursor needs to be disabled first.
*/
- if (acrtc_state->active_planes == 0)
+ if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE &&
+ dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
+ struct dc_cursor_position cursor_position = {0};
+
+ if (!dc_stream_set_cursor_position(acrtc_state->stream,
+ &cursor_position))
+ drm_err(dev, "DC failed to disable native cursor\n");
+
+ bundle->stream_update.cursor_position =
+ &acrtc_state->stream->cursor_position;
+ }
+
+ if (acrtc_state->active_planes == 0 &&
+ dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
amdgpu_dm_commit_cursors(state);
/* update planes when needed */
@@ -8760,7 +8808,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
/* Cursor plane is handled after stream updates */
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
if ((fb && crtc == pcrtc) ||
(old_plane_state->fb && old_plane_state->crtc == pcrtc)) {
cursor_update = true;
@@ -9116,7 +9165,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* to be disabling a single plane - those pipes are being disabled.
*/
if (acrtc_state->active_planes &&
- (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0))
+ (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) &&
+ acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
amdgpu_dm_commit_cursors(state);
cleanup:
@@ -9744,6 +9794,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
+ sort(dummy_updates, status->plane_count,
+ sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
mutex_lock(&dm->dc_lock);
dc_exit_ips_for_hw_access(dm->dc);
@@ -10434,7 +10486,8 @@ static bool should_reset_plane(struct drm_atomic_state *state,
{
struct drm_plane *other;
struct drm_plane_state *old_other_state, *new_other_state;
- struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state;
struct amdgpu_device *adev = drm_to_adev(plane->dev);
int i;
@@ -10456,14 +10509,38 @@ static bool should_reset_plane(struct drm_atomic_state *state,
new_crtc_state =
drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+ old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, old_plane_state->crtc);
if (!new_crtc_state)
return true;
+ /*
+ * A change in cursor mode means a new dc pipe needs to be acquired or
+ * released from the state
+ */
+ old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
+ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ old_dm_crtc_state != NULL &&
+ old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) {
+ return true;
+ }
+
/* CRTC Degamma changes currently require us to recreate planes. */
if (new_crtc_state->color_mgmt_changed)
return true;
+ /*
+ * On zpos change, planes need to be reordered by removing and re-adding
+ * them one by one to the dc state, in order of descending zpos.
+ *
+ * TODO: We can likely skip bandwidth validation if the only thing that
+ * changed about the plane was it'z z-ordering.
+ */
+ if (new_crtc_state->zpos_changed)
+ return true;
+
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
return true;
@@ -10611,6 +10688,68 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
return 0;
}
+/*
+ * Helper function for checking the cursor in native mode
+ */
+static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc,
+ struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state,
+ bool enable)
+{
+
+ struct amdgpu_crtc *new_acrtc;
+ int ret;
+
+ if (!enable || !new_plane_crtc ||
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+ if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ if (new_plane_state->fb) {
+ ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
+ new_plane_state->fb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool dm_should_update_native_cursor(struct drm_atomic_state *state,
+ struct drm_crtc *old_plane_crtc,
+ struct drm_crtc *new_plane_crtc,
+ bool enable)
+{
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+
+ if (!enable) {
+ if (old_plane_crtc == NULL)
+ return true;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
+ } else {
+ if (new_plane_crtc == NULL)
+ return true;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
+ }
+}
+
static int dm_update_plane_state(struct dc *dc,
struct drm_atomic_state *state,
struct drm_plane *plane,
@@ -10626,8 +10765,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
- struct amdgpu_crtc *new_acrtc;
- bool needs_reset;
+ bool needs_reset, update_native_cursor;
int ret = 0;
@@ -10636,24 +10774,16 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state = to_dm_plane_state(new_plane_state);
dm_old_plane_state = to_dm_plane_state(old_plane_state);
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- if (!enable || !new_plane_crtc ||
- drm_atomic_plane_disabling(plane->state, new_plane_state))
- return 0;
-
- new_acrtc = to_amdgpu_crtc(new_plane_crtc);
-
- if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
- DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
- return -EINVAL;
- }
+ update_native_cursor = dm_should_update_native_cursor(state,
+ old_plane_crtc,
+ new_plane_crtc,
+ enable);
- if (new_plane_state->fb) {
- ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
- new_plane_state->fb);
- if (ret)
- return ret;
- }
+ if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) {
+ ret = dm_check_native_cursor_state(new_plane_crtc, plane,
+ new_plane_state, enable);
+ if (ret)
+ return ret;
return 0;
}
@@ -10719,20 +10849,14 @@ static int dm_update_plane_state(struct dc *dc,
ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
- return ret;
+ goto out;
WARN_ON(dm_new_plane_state->dc_state);
dc_new_plane_state = dc_create_plane_state(dc);
- if (!dc_new_plane_state)
- return -ENOMEM;
-
- /* Block top most plane from being a video plane */
- if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
- if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
- return -EINVAL;
-
- *is_top_most_overlay = false;
+ if (!dc_new_plane_state) {
+ ret = -ENOMEM;
+ goto out;
}
DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
@@ -10745,13 +10869,13 @@ static int dm_update_plane_state(struct dc *dc,
new_crtc_state);
if (ret) {
dc_plane_state_release(dc_new_plane_state);
- return ret;
+ goto out;
}
ret = dm_atomic_get_state(state, &dm_state);
if (ret) {
dc_plane_state_release(dc_new_plane_state);
- return ret;
+ goto out;
}
/*
@@ -10768,7 +10892,8 @@ static int dm_update_plane_state(struct dc *dc,
dm_state->context)) {
dc_plane_state_release(dc_new_plane_state);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
dm_new_plane_state->dc_state = dc_new_plane_state;
@@ -10783,6 +10908,16 @@ static int dm_update_plane_state(struct dc *dc,
*lock_and_validation_needed = true;
}
+out:
+ /* If enabling cursor overlay failed, attempt fallback to native mode */
+ if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) {
+ ret = dm_check_native_cursor_state(new_plane_crtc, plane,
+ new_plane_state, enable);
+ if (ret)
+ return ret;
+
+ dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE;
+ }
return ret;
}
@@ -10816,99 +10951,64 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
*out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
}
-static int dm_check_crtc_cursor(struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- struct drm_crtc_state *new_crtc_state)
+/*
+ * The normalized_zpos value cannot be used by this iterator directly. It's only
+ * calculated for enabled planes, potentially causing normalized_zpos collisions
+ * between enabled/disabled planes in the atomic state. We need a unique value
+ * so that the iterator will not generate the same object twice, or loop
+ * indefinitely.
+ */
+static inline struct __drm_planes_state *__get_next_zpos(
+ struct drm_atomic_state *state,
+ struct __drm_planes_state *prev)
{
- struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct drm_plane_state *new_cursor_state, *new_underlying_state;
- int i;
- int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
- bool any_relevant_change = false;
-
- /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
- * cursor per pipe but it's going to inherit the scaling and
- * positioning from the underlying pipe. Check the cursor plane's
- * blending properties match the underlying planes'.
- */
-
- /* If no plane was enabled or changed scaling, no need to check again */
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
- int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
-
- if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
- continue;
-
- if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
- any_relevant_change = true;
- break;
- }
-
- if (new_plane_state->fb == old_plane_state->fb &&
- new_plane_state->crtc_w == old_plane_state->crtc_w &&
- new_plane_state->crtc_h == old_plane_state->crtc_h)
- continue;
-
- dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
- dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+ unsigned int highest_zpos = 0, prev_zpos = 256;
+ uint32_t highest_id = 0, prev_id = UINT_MAX;
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane *plane;
+ int i, highest_i = -1;
- if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
- any_relevant_change = true;
- break;
- }
+ if (prev != NULL) {
+ prev_zpos = prev->new_state->zpos;
+ prev_id = prev->ptr->base.id;
}
- if (!any_relevant_change)
- return 0;
-
- new_cursor_state = drm_atomic_get_plane_state(state, cursor);
- if (IS_ERR(new_cursor_state))
- return PTR_ERR(new_cursor_state);
-
- if (!new_cursor_state->fb)
- return 0;
-
- dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
-
- /* Need to check all enabled planes, even if this commit doesn't change
- * their state
- */
- i = drm_atomic_add_affected_planes(state, crtc);
- if (i)
- return i;
-
- for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
- /* Narrow down to non-cursor planes on the same CRTC as the cursor */
- if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ /* Skip planes with higher zpos than the previously returned */
+ if (new_plane_state->zpos > prev_zpos ||
+ (new_plane_state->zpos == prev_zpos &&
+ plane->base.id >= prev_id))
continue;
- /* Ignore disabled planes */
- if (!new_underlying_state->fb)
- continue;
-
- dm_get_plane_scale(new_underlying_state,
- &underlying_scale_w, &underlying_scale_h);
-
- if (cursor_scale_w != underlying_scale_w ||
- cursor_scale_h != underlying_scale_h) {
- drm_dbg_atomic(crtc->dev,
- "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
- cursor->base.id, cursor->name, underlying->base.id, underlying->name);
- return -EINVAL;
+ /* Save the index of the plane with highest zpos */
+ if (new_plane_state->zpos > highest_zpos ||
+ (new_plane_state->zpos == highest_zpos &&
+ plane->base.id > highest_id)) {
+ highest_zpos = new_plane_state->zpos;
+ highest_id = plane->base.id;
+ highest_i = i;
}
-
- /* If this plane covers the whole CRTC, no need to check planes underneath */
- if (new_underlying_state->crtc_x <= 0 &&
- new_underlying_state->crtc_y <= 0 &&
- new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
- new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
- break;
}
- return 0;
+ if (highest_i < 0)
+ return NULL;
+
+ return &state->planes[highest_i];
}
+/*
+ * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate
+ * by descending zpos, as read from the new plane state. This is the same
+ * ordering as defined by drm_atomic_normalize_zpos().
+ */
+#define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \
+ for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \
+ __i != NULL; __i = __get_next_zpos((__state), __i)) \
+ for_each_if(((plane) = __i->ptr, \
+ (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
+ (old_plane_state) = __i->old_state, \
+ (new_plane_state) = __i->new_state, 1))
+
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_connector *connector;
@@ -10940,6 +11040,165 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
}
/**
+ * DOC: Cursor Modes - Native vs Overlay
+ *
+ * In native mode, the cursor uses a integrated cursor pipe within each DCN hw
+ * plane. It does not require a dedicated hw plane to enable, but it is
+ * subjected to the same z-order and scaling as the hw plane. It also has format
+ * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB
+ * hw plane.
+ *
+ * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its
+ * own scaling and z-pos. It also has no blending restrictions. It lends to a
+ * cursor behavior more akin to a DRM client's expectations. However, it does
+ * occupy an extra DCN plane, and therefore will only be used if a DCN plane is
+ * available.
+ */
+
+/**
+ * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc
+ * @adev: amdgpu device
+ * @state: DRM atomic state
+ * @dm_crtc_state: amdgpu state for the CRTC containing the cursor
+ * @cursor_mode: Returns the required cursor mode on dm_crtc_state
+ *
+ * Get whether the cursor should be enabled in native mode, or overlay mode, on
+ * the dm_crtc_state.
+ *
+ * The cursor should be enabled in overlay mode if there exists an underlying
+ * plane - on which the cursor may be blended - that is either YUV formatted, or
+ * scaled differently from the cursor.
+ *
+ * Since zpos info is required, drm_atomic_normalize_zpos must be called before
+ * calling this function.
+ *
+ * Return: 0 on success, or an error code if getting the cursor plane state
+ * failed.
+ */
+static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
+ struct drm_atomic_state *state,
+ struct dm_crtc_state *dm_crtc_state,
+ enum amdgpu_dm_cursor_mode *cursor_mode)
+{
+ struct drm_plane_state *old_plane_state, *plane_state, *cursor_state;
+ struct drm_crtc_state *crtc_state = &dm_crtc_state->base;
+ struct drm_plane *plane;
+ bool consider_mode_change = false;
+ bool entire_crtc_covered = false;
+ bool cursor_changed = false;
+ int underlying_scale_w, underlying_scale_h;
+ int cursor_scale_w, cursor_scale_h;
+ int i;
+
+ /* Overlay cursor not supported on HW before DCN */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0) {
+ *cursor_mode = DM_CURSOR_NATIVE_MODE;
+ return 0;
+ }
+
+ /* Init cursor_mode to be the same as current */
+ *cursor_mode = dm_crtc_state->cursor_mode;
+
+ /*
+ * Cursor mode can change if a plane's format changes, scale changes, is
+ * enabled/disabled, or z-order changes.
+ */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) {
+ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
+
+ /* Only care about planes on this CRTC */
+ if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0)
+ continue;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ cursor_changed = true;
+
+ if (drm_atomic_plane_enabling(old_plane_state, plane_state) ||
+ drm_atomic_plane_disabling(old_plane_state, plane_state) ||
+ old_plane_state->fb->format != plane_state->fb->format) {
+ consider_mode_change = true;
+ break;
+ }
+
+ dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h);
+ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
+ consider_mode_change = true;
+ break;
+ }
+ }
+
+ if (!consider_mode_change && !crtc_state->zpos_changed)
+ return 0;
+
+ /*
+ * If no cursor change on this CRTC, and not enabled on this CRTC, then
+ * no need to set cursor mode. This avoids needlessly locking the cursor
+ * state.
+ */
+ if (!cursor_changed &&
+ !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) {
+ return 0;
+ }
+
+ cursor_state = drm_atomic_get_plane_state(state,
+ crtc_state->crtc->cursor);
+ if (IS_ERR(cursor_state))
+ return PTR_ERR(cursor_state);
+
+ /* Cursor is disabled */
+ if (!cursor_state->fb)
+ return 0;
+
+ /* For all planes in descending z-order (all of which are below cursor
+ * as per zpos definitions), check their scaling and format
+ */
+ for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) {
+
+ /* Only care about non-cursor planes on this CRTC */
+ if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 ||
+ plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ /* Underlying plane is YUV format - use overlay cursor */
+ if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) {
+ *cursor_mode = DM_CURSOR_OVERLAY_MODE;
+ return 0;
+ }
+
+ dm_get_plane_scale(plane_state,
+ &underlying_scale_w, &underlying_scale_h);
+ dm_get_plane_scale(cursor_state,
+ &cursor_scale_w, &cursor_scale_h);
+
+ /* Underlying plane has different scale - use overlay cursor */
+ if (cursor_scale_w != underlying_scale_w &&
+ cursor_scale_h != underlying_scale_h) {
+ *cursor_mode = DM_CURSOR_OVERLAY_MODE;
+ return 0;
+ }
+
+ /* If this plane covers the whole CRTC, no need to check planes underneath */
+ if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 &&
+ plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay &&
+ plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) {
+ entire_crtc_covered = true;
+ break;
+ }
+ }
+
+ /* If planes do not cover the entire CRTC, use overlay mode to enable
+ * cursor over holes
+ */
+ if (entire_crtc_covered)
+ *cursor_mode = DM_CURSOR_NATIVE_MODE;
+ else
+ *cursor_mode = DM_CURSOR_OVERLAY_MODE;
+
+ return 0;
+}
+
+/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
* @dev: The DRM device
@@ -11108,8 +11367,23 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
+ /*
+ * Determine whether cursors on each CRTC should be enabled in native or
+ * overlay mode.
+ */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
+ &dm_new_crtc_state->cursor_mode);
+ if (ret) {
+ drm_dbg(dev, "Failed to determine cursor mode\n");
+ goto fail;
+ }
+ }
+
/* Remove exiting planes if they are modified */
- for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
if (old_plane_state->fb && new_plane_state->fb &&
get_mem_type(old_plane_state->fb) !=
get_mem_type(new_plane_state->fb))
@@ -11154,7 +11428,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
/* Add new/modified planes */
- for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
new_plane_state,
@@ -11188,11 +11462,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc);
}
- /* Check cursor planes scaling */
+ /* Check cursor planes restrictions */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
+ enum amdgpu_dm_cursor_mode required_cursor_mode;
+
+ /* Overlay cusor not subject to native cursor restrictions */
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
+ continue;
+
+ /* If HW can only do native cursor, check restrictions again */
+ ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
+ &required_cursor_mode);
+
if (ret) {
- drm_dbg_atomic(dev, "dm_check_crtc_cursor() failed\n");
+ drm_dbg_driver(crtc->dev,
+ "[CRTC:%d:%s] Checking cursor mode failed\n",
+ crtc->base.id, crtc->name);
+ goto fail;
+ } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) {
+ drm_dbg_driver(crtc->dev,
+ "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n",
+ crtc->base.id, crtc->name);
+ ret = -EINVAL;
goto fail;
}
}
@@ -11806,6 +12098,12 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
mutex_unlock(&adev->dm.dc_lock);
}
+static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
+{
+ if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
+ dc_exit_ips_for_hw_access(dc);
+}
+
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
u32 value, const char *func_name)
{
@@ -11816,6 +12114,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
return;
}
#endif
+
+ amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
cgs_write_register(ctx->cgs_device, address, value);
trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
}
@@ -11839,6 +12139,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
return 0;
}
+ amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
+
value = cgs_read_register(ctx->cgs_device, address);
trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 94fc4c15d2db..dfcbc1970fe6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,7 +50,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 5
+#define AMDGPU_DMUB_NOTIFICATION_MAX 6
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
@@ -180,6 +180,14 @@ struct amdgpu_dm_backlight_caps {
* @aux_support: Describes if the display supports AUX backlight.
*/
bool aux_support;
+ /**
+ * @ac_level: the default brightness if booted on AC
+ */
+ u8 ac_level;
+ /**
+ * @dc_level: the default brightness if booted on DC
+ */
+ u8 dc_level;
};
/**
@@ -834,6 +842,11 @@ struct dm_plane_state {
enum amdgpu_transfer_function blend_tf;
};
+enum amdgpu_dm_cursor_mode {
+ DM_CURSOR_NATIVE_MODE = 0,
+ DM_CURSOR_OVERLAY_MODE,
+};
+
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
@@ -864,6 +877,8 @@ struct dm_crtc_state {
* encoding.
*/
enum amdgpu_transfer_function regamma_tf;
+
+ enum amdgpu_dm_cursor_mode cursor_mode;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 83ea0afddda7..e16eecb146fd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -361,6 +361,7 @@ static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *cr
state->regamma_tf = cur->regamma_tf;
state->crc_skip_count = cur->crc_skip_count;
state->mpo_requested = cur->mpo_requested;
+ state->cursor_mode = cur->cursor_mode;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 27d5c6077630..717d97191dda 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1008,6 +1008,7 @@ static int replay_capability_show(struct seq_file *m, void *data)
seq_printf(m, "Sink support: %s\n", str_yes_no(sink_support_replay));
seq_printf(m, "Driver support: %s\n", str_yes_no(driver_support_replay));
+ seq_printf(m, "Config support: %s\n", str_yes_no(link->replay_settings.config.replay_supported));
return 0;
}
@@ -1419,7 +1420,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
uint8_t param_nums = 0;
bool ret = false;
- if (!aconnector || !aconnector->dc_link)
+ if (!aconnector->dc_link)
return -EINVAL;
if (size == 0)
@@ -3074,7 +3075,7 @@ static int psr_read_residency(void *data, u64 *val)
struct dc_link *link = connector->dc_link;
u32 residency = 0;
- link->dc->link_srv->edp_get_psr_residency(link, &residency);
+ link->dc->link_srv->edp_get_psr_residency(link, &residency, PSR_RESIDENCY_MODE_PHY);
*val = (u64)residency;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 2648d2b5be3e..adbf560d6a74 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -807,9 +807,6 @@ bool dm_helpers_dp_write_dsc_enable(
uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
uint8_t ret = 0;
- if (!stream)
- return false;
-
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
if (!aconnector->dsc_aux)
return false;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index ac60f688660a..48118447c8d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -182,6 +182,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+ aconnector->dsc_aux = NULL;
+ port->passthrough_aux = NULL;
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -498,6 +500,8 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+ aconnector->dsc_aux = NULL;
+ port->passthrough_aux = NULL;
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -1238,14 +1242,6 @@ static bool is_dsc_need_re_compute(
if (!aconnector || !aconnector->dsc_aux)
continue;
- /*
- * check if cached virtual MST DSC caps are available and DSC is supported
- * as per specifications in their Virtual DPCD registers.
- */
- if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported ||
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
- continue;
-
stream_on_link[new_stream_on_link_num] = aconnector;
new_stream_on_link_num++;
@@ -1601,111 +1597,171 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
}
#endif
+#if defined(CONFIG_DRM_AMD_DC_FP)
+static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_link_bw)
+{
+ uint32_t total_data_bw_efficiency_x10000 = 0;
+ uint32_t link_rate_per_lane_kbps = 0;
+ enum dc_link_rate link_rate;
+ union lane_count_set lane_count;
+ u8 dp_link_encoding;
+ u8 link_bw_set = 0;
+
+ *cur_link_bw = 0;
+
+ if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
+ drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
+ drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
+ return false;
+
+ switch (dp_link_encoding) {
+ case DP_8b_10b_ENCODING:
+ link_rate = link_bw_set;
+ link_rate_per_lane_kbps = link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
+ total_data_bw_efficiency_x10000 /= 100;
+ total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
+ break;
+ case DP_128b_132b_ENCODING:
+ switch (link_bw_set) {
+ case DP_LINK_BW_10:
+ link_rate = LINK_RATE_UHBR10;
+ break;
+ case DP_LINK_BW_13_5:
+ link_rate = LINK_RATE_UHBR13_5;
+ break;
+ case DP_LINK_BW_20:
+ link_rate = LINK_RATE_UHBR20;
+ break;
+ default:
+ return false;
+ }
+
+ link_rate_per_lane_kbps = link_rate * 10000;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
+ break;
+ default:
+ return false;
+ }
+
+ *cur_link_bw = link_rate_per_lane_kbps * lane_count.bits.LANE_COUNT_SET / 10000 * total_data_bw_efficiency_x10000;
+ return true;
+}
+#endif
+
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
- int branch_max_throughput_mps = 0;
#if defined(CONFIG_DRM_AMD_DC_FP)
+ int branch_max_throughput_mps = 0;
struct dc_link_settings cur_link_settings;
- int pbn;
- unsigned int end_to_end_bw_in_kbps = 0;
- unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
+ uint32_t end_to_end_bw_in_kbps = 0;
+ uint32_t root_link_bw_in_kbps = 0;
+ uint32_t virtual_channel_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
struct dc_dsc_config_options dsc_options = {0};
+ uint32_t stream_kbps;
- /*
- * Consider the case with the depth of the mst topology tree is equal or less than 2
- * A. When dsc bitstream can be transmitted along the entire path
- * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
- * 2. dsc passthrough supported at MST branch, or
- * 3. dsc decoding supported at leaf MST device
- * Use maximum dsc compression as bw constraint
- * B. When dsc bitstream cannot be transmitted along the entire path
- * Use native bw as bw constraint
+ /* DSC unnecessary case
+ * Check if timing could be supported within end-to-end BW
*/
- if (is_dsc_common_config_possible(stream, &bw_range) &&
- (aconnector->mst_output_port->passthrough_aux ||
- aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
- cur_link_settings = stream->link->verified_link_cap;
- upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
- down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
-
- /* pick the end to end bw bottleneck */
- end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps);
-
- if (end_to_end_bw_in_kbps < bw_range.min_kbps) {
- DRM_DEBUG_DRIVER("maximum dsc compression cannot fit into end-to-end bandwidth\n");
+ stream_kbps =
+ dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(stream->link));
+ cur_link_settings = stream->link->verified_link_cap;
+ root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
+ virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+
+ /* pick the end to end bw bottleneck */
+ end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
+
+ if (stream_kbps <= end_to_end_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("No DSC needed. End-to-end bw sufficient.");
+ return DC_OK;
+ }
+
+ /*DSC necessary case*/
+ if (!aconnector->dsc_aux)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (is_dsc_common_config_possible(stream, &bw_range)) {
+
+ /*capable of dsc passthough. dsc bitstream along the entire path*/
+ if (aconnector->mst_output_port->passthrough_aux) {
+ if (bw_range.min_kbps > end_to_end_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("DSC passthrough. Max dsc compression can't fit into end-to-end bw\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ }
+ } else {
+ /*dsc bitstream decoded at the dp last link*/
+ struct drm_dp_mst_port *immediate_upstream_port = NULL;
+ uint32_t end_link_bw = 0;
+
+ /*Get last DP link BW capability*/
+ if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
+ if (stream_kbps > end_link_bw) {
+ DRM_DEBUG_DRIVER("DSC decode at last link. Mode required bw can't fit into available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+ }
- if (end_to_end_bw_in_kbps < bw_range.stream_kbps) {
- dc_dsc_get_default_config_option(stream->link->dc, &dsc_options);
- dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16;
- if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0],
- &stream->sink->dsc_caps.dsc_dec_caps,
- &dsc_options,
- end_to_end_bw_in_kbps,
- &stream->timing,
- dc_link_get_highest_encoding_format(stream->link),
- &stream->timing.dsc_cfg)) {
- stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc and dsc config found\n");
- } else {
- DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc but dsc config not found\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
+ /*Get virtual channel bandwidth between source and the link before the last link*/
+ if (aconnector->mst_output_port->parent->port_parent)
+ immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
+
+ if (immediate_upstream_port) {
+ virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("DSC decode at last link. Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
}
}
- } else {
- /* Check if mode could be supported within max slot
- * number of current mst link and full_pbn of mst links.
- */
- int pbn_div, slot_num, max_slot_num;
- enum dc_link_encoding_format link_encoding;
- uint32_t stream_kbps =
- dc_bandwidth_in_kbps_from_timing(&stream->timing,
- dc_link_get_highest_encoding_format(stream->link));
-
- pbn = kbps_to_peak_pbn(stream_kbps);
- pbn_div = dm_mst_get_pbn_divider(stream->link);
- slot_num = DIV_ROUND_UP(pbn, pbn_div);
-
- link_encoding = dc_link_get_highest_encoding_format(stream->link);
- if (link_encoding == DC_LINK_ENCODING_DP_8b_10b)
- max_slot_num = 63;
- else if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
- max_slot_num = 64;
- else {
- DRM_DEBUG_DRIVER("Invalid link encoding format\n");
+
+ /*Confirm if we can obtain dsc config*/
+ dc_dsc_get_default_config_option(stream->link->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16;
+ if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0],
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &dsc_options,
+ end_to_end_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(stream->link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("Require dsc and dsc config found\n");
+ } else {
+ DRM_DEBUG_DRIVER("Require dsc but can't find appropriate dsc config\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
- if (slot_num > max_slot_num ||
- pbn > aconnector->mst_output_port->full_pbn) {
- DRM_DEBUG_DRIVER("Mode can not be supported within mst links!");
- return DC_FAIL_BANDWIDTH_VALIDATE;
+ /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
+ switch (stream->timing.pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ branch_max_throughput_mps =
+ aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps;
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ case PIXEL_ENCODING_YCBCR420:
+ branch_max_throughput_mps =
+ aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps;
+ break;
+ default:
+ break;
}
- }
- /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
- switch (stream->timing.pixel_encoding) {
- case PIXEL_ENCODING_RGB:
- case PIXEL_ENCODING_YCBCR444:
- branch_max_throughput_mps =
- aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps;
- break;
- case PIXEL_ENCODING_YCBCR422:
- case PIXEL_ENCODING_YCBCR420:
- branch_max_throughput_mps =
- aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps;
- break;
- default:
- break;
- }
-#endif
- if (branch_max_throughput_mps != 0 &&
- ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000))
+ if (branch_max_throughput_mps != 0 &&
+ ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) {
+ DRM_DEBUG_DRIVER("DSC is required but max throughput mps fails");
return DC_FAIL_BANDWIDTH_VALIDATE;
-
+ }
+ } else {
+ DRM_DEBUG_DRIVER("DSC is required but can't find common dsc config.");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+#endif
return DC_OK;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index a64f20fcddaa..bb534b2b0b71 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -104,8 +104,6 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state
*global_alpha = false;
*global_alpha_value = 0xff;
- if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
- return;
if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
@@ -1190,10 +1188,21 @@ static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane_state *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
return -EINVAL;
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ /* Reject overlay cursors for now*/
+ if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
+ return -EINVAL;
+
return 0;
}
@@ -1690,6 +1699,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
int res = -EPERM;
unsigned int supported_rotations;
uint64_t *modifiers = NULL;
+ unsigned int primary_zpos = dm->dc->caps.max_slave_planes;
num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats,
ARRAY_SIZE(formats));
@@ -1719,10 +1729,19 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
}
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- drm_plane_create_zpos_immutable_property(plane, 0);
+ /*
+ * Allow OVERLAY planes to be used as underlays by assigning an
+ * immutable zpos = # of OVERLAY planes to the PRIMARY plane.
+ */
+ drm_plane_create_zpos_immutable_property(plane, primary_zpos);
} else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
- unsigned int zpos = 1 + drm_plane_index(plane);
- drm_plane_create_zpos_property(plane, zpos, 1, 254);
+ /*
+ * OVERLAY planes can be below or above the PRIMARY, but cannot
+ * be above the CURSOR plane.
+ */
+ unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane);
+
+ drm_plane_create_zpos_property(plane, zpos, 0, 254);
} else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
drm_plane_create_zpos_immutable_property(plane, 255);
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 9c2f932217e4..80069651def3 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -22,7 +22,7 @@
#
# Makefile for Display Core (dc) component.
-DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp hubbub dccg hubp
+DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp hubbub dccg hubp dio dwb hpo mmhubbub mpc opp pg
ifdef CONFIG_DRM_AMD_DC_FP
@@ -36,10 +36,6 @@ DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn31
DC_LIBS += dcn314
-DC_LIBS += dcn32
-DC_LIBS += dcn321
-DC_LIBS += dcn35
-DC_LIBS += dcn401
DC_LIBS += dml
DC_LIBS += dml2
endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 1726bdf89bae..506f82cd5cc6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -140,8 +140,6 @@ struct fixed31_32 dc_fixpt_mul(struct fixed31_32 arg1, struct fixed31_32 arg2)
res.value = arg1_int * arg2_int;
- ASSERT(res.value <= LONG_MAX);
-
res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
tmp = arg1_int * arg2_fra;
@@ -185,8 +183,6 @@ struct fixed31_32 dc_fixpt_sqr(struct fixed31_32 arg)
res.value = arg_int * arg_int;
- ASSERT(res.value <= LONG_MAX);
-
res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
tmp = arg_int * arg_fra;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 25fe1a124029..3bacf470f7c5 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -665,6 +665,9 @@ static enum bp_result get_ss_info_v3_1(
ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
DATA_TABLES(ASIC_InternalSS_Info),
struct_size(ss_table_header_include, asSpreadSpectrum, 1)));
+ if (!ss_table_header_include)
+ return BP_RESULT_UNSUPPORTED;
+
table_size =
(le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
- sizeof(ATOM_COMMON_TABLE_HEADER))
@@ -1034,6 +1037,8 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
&bp->base,
DATA_TABLES(ASIC_InternalSS_Info),
struct_size(header, asSpreadSpectrum, 1)));
+ if (!header)
+ return result;
memset(info, 0, sizeof(struct spread_spectrum_info));
@@ -1107,6 +1112,8 @@ static enum bp_result get_ss_info_from_ss_info_table(
get_atom_data_table_revision(header, &revision);
tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
+ if (!tbl)
+ return result;
if (1 != revision.major || 2 > revision.minor)
return result;
@@ -1634,6 +1641,8 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
DATA_TABLES(SS_Info));
+ if (!tbl)
+ return number;
if (1 != revision.major || 2 > revision.minor)
return number;
@@ -1716,6 +1725,8 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
&bp->base,
DATA_TABLES(ASIC_InternalSS_Info),
struct_size(header_include, asSpreadSpectrum, 1)));
+ if (!header_include)
+ return 0;
size = (le16_to_cpu(header_include->sHeader.usStructureSize)
- sizeof(ATOM_COMMON_TABLE_HEADER))
@@ -1755,6 +1766,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
DATA_TABLES(ASIC_InternalSS_Info),
struct_size(header_include, asSpreadSpectrum, 1)));
+ if (!header_include)
+ return number;
+
size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index aea4bb46856e..e18097f82091 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -320,16 +320,16 @@ static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an
regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ if (regs_and_bypass->dppclk_bypass > 4)
regs_and_bypass->dppclk_bypass = 0;
regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ if (regs_and_bypass->dcfclk_bypass > 4)
regs_and_bypass->dcfclk_bypass = 0;
regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ if (regs_and_bypass->dispclk_bypass > 4)
regs_and_bypass->dispclk_bypass = 0;
regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ if (regs_and_bypass->dprefclk_bypass > 4)
regs_and_bypass->dprefclk_bypass = 0;
if (log_info->enabled) {
@@ -772,7 +772,7 @@ void rn_clk_mgr_construct(
status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
if (status == PP_SMU_RESULT_OK &&
- ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ ctx->dc_bios->integrated_info) {
rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
/* treat memory config as single channel if memory is asymmetrics. */
if (ctx->dc->config.is_asymmetric_memory)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 191d8b969d19..9e2ef0e724fc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -252,16 +252,16 @@ static void vg_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an
regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ if (regs_and_bypass->dppclk_bypass > 4)
regs_and_bypass->dppclk_bypass = 0;
regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ if (regs_and_bypass->dcfclk_bypass > 4)
regs_and_bypass->dcfclk_bypass = 0;
regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ if (regs_and_bypass->dispclk_bypass > 4)
regs_and_bypass->dispclk_bypass = 0;
regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ if (regs_and_bypass->dprefclk_bypass > 4)
regs_and_bypass->dprefclk_bypass = 0;
if (log_info->enabled) {
@@ -731,7 +731,7 @@ void vg_clk_mgr_construct(
clk_mgr->base.base.bw_params = &vg_bw_params;
vg_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios->integrated_info) {
vg_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 12a7752758b8..e93df3d6222e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -785,7 +785,7 @@ void dcn31_clk_mgr_construct(
i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
}
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios->integrated_info) {
dcn31_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index a84f1e376dee..29eff386505a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -896,7 +896,7 @@ void dcn314_clk_mgr_construct(
i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
}
- if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
+ if (ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
dcn314_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 5506cf9b3672..a0fb4481d2f1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -712,7 +712,7 @@ void dcn315_clk_mgr_construct(
i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
}
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios->integrated_info) {
dcn315_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 20ca7afa9cb4..c3e50c3aaa60 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -652,7 +652,7 @@ void dcn316_clk_mgr_construct(
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
dcn316_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios->integrated_info) {
dcn316_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index ff5fdc7b1198..06f0c41ad6f1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -36,7 +36,7 @@
#include "link.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
-#include "smu13_driver_if.h"
+#include "dcn32_smu13_driver_if.h"
#include "dcn/dcn_3_2_0_offset.h"
#include "dcn/dcn_3_2_0_sh_mask.h"
@@ -553,7 +553,7 @@ static void dcn32_auto_dpm_test_log(
//
// AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
////////////////////////////////////////////////////////////////////////////
- if (new_clocks && active_pipe_count > 0 &&
+ if (active_pipe_count > 0 &&
new_clocks->dramclk_khz > 0 &&
new_clocks->fclk_khz > 0 &&
new_clocks->dcfclk_khz > 0 &&
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index f2f60478b1a6..cf2d35363e8b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -28,7 +28,7 @@
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#include "dalsmc.h"
-#include "smu13_driver_if.h"
+#include "dcn32_smu13_driver_if.h"
#define mmDAL_MSG_REG 0x1628A
#define mmDAL_ARG_REG 0x16273
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/smu13_driver_if.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/smu13_driver_if.h
deleted file mode 100644
index deeb85047e7b..000000000000
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/smu13_driver_if.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2021 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#ifndef SMU13_DRIVER_IF_DCN32_H
-#define SMU13_DRIVER_IF_DCN32_H
-
-// *** IMPORTANT ***
-// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x18
-
-//Only Clks that have DPM descriptors are listed here
-typedef enum {
- PPCLK_GFXCLK = 0,
- PPCLK_SOCCLK,
- PPCLK_UCLK,
- PPCLK_FCLK,
- PPCLK_DCLK_0,
- PPCLK_VCLK_0,
- PPCLK_DCLK_1,
- PPCLK_VCLK_1,
- PPCLK_DISPCLK,
- PPCLK_DPPCLK,
- PPCLK_DPREFCLK,
- PPCLK_DCFCLK,
- PPCLK_DTBCLK,
- PPCLK_COUNT,
-} PPCLK_e;
-
-typedef enum {
- UCLK_DIV_BY_1 = 0,
- UCLK_DIV_BY_2,
- UCLK_DIV_BY_4,
- UCLK_DIV_BY_8,
-} UCLK_DIV_e;
-
-typedef struct {
- uint8_t WmSetting;
- uint8_t Flags;
- uint8_t Padding[2];
-
-} WatermarkRowGeneric_t;
-
-#define NUM_WM_RANGES 4
-
-typedef enum {
- WATERMARKS_CLOCK_RANGE = 0,
- WATERMARKS_DUMMY_PSTATE,
- WATERMARKS_MALL,
- WATERMARKS_COUNT,
-} WATERMARKS_FLAGS_e;
-
-typedef struct {
- // Watermarks
- WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES];
-} Watermarks_t;
-
-typedef struct {
- Watermarks_t Watermarks;
- uint32_t Spare[16];
-
- uint32_t MmHubPadding[8]; // SMU internal use
-} WatermarksExternal_t;
-
-// These defines are used with the following messages:
-// SMC_MSG_TransferTableDram2Smu
-// SMC_MSG_TransferTableSmu2Dram
-
-// Table transfer status
-#define TABLE_TRANSFER_OK 0x0
-#define TABLE_TRANSFER_FAILED 0xFF
-#define TABLE_TRANSFER_PENDING 0xAB
-
-// Table types
-#define TABLE_PMFW_PPTABLE 0
-#define TABLE_COMBO_PPTABLE 1
-#define TABLE_WATERMARKS 2
-#define TABLE_AVFS_PSM_DEBUG 3
-#define TABLE_PMSTATUSLOG 4
-#define TABLE_SMU_METRICS 5
-#define TABLE_DRIVER_SMU_CONFIG 6
-#define TABLE_ACTIVITY_MONITOR_COEFF 7
-#define TABLE_OVERDRIVE 8
-#define TABLE_I2C_COMMANDS 9
-#define TABLE_DRIVER_INFO 10
-#define TABLE_COUNT 11
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 6c9b4e6491a5..5691714f738d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -218,6 +218,57 @@ static void dcn35_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
}
}
+static uint8_t get_lowest_dpia_index(const struct dc_link *link)
+{
+ const struct dc *dc_struct = link->dc;
+ uint8_t idx = 0xFF;
+ int i;
+
+ for (i = 0; i < MAX_PIPES * 2; ++i) {
+ if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ continue;
+
+ if (idx > dc_struct->links[i]->link_index)
+ idx = dc_struct->links[i]->link_index;
+ }
+
+ return idx;
+}
+
+static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 };
+ int i;
+
+ for (i = 0; i < context->stream_count; ++i) {
+ const struct dc_stream_state *stream = context->streams[i];
+ const struct dc_link *link = stream->link;
+ uint8_t lowest_dpia_index = 0, hr_index = 0;
+
+ if (!link)
+ continue;
+
+ lowest_dpia_index = get_lowest_dpia_index(link);
+ if (link->link_index < lowest_dpia_index)
+ continue;
+
+ hr_index = (link->link_index - lowest_dpia_index) / 2;
+ host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing(
+ &stream->timing, dc_link_get_highest_encoding_format(link));
+ }
+
+ for (i = 0; i < MAX_HOST_ROUTERS_NUM; ++i) {
+ new_clocks->host_router_bw_kbps[i] = host_router_bw_kbps[i];
+ if (should_set_clock(safe_to_lower, new_clocks->host_router_bw_kbps[i], clk_mgr_base->clks.host_router_bw_kbps[i])) {
+ clk_mgr_base->clks.host_router_bw_kbps[i] = new_clocks->host_router_bw_kbps[i];
+ dcn35_smu_notify_host_router_bw(clk_mgr, i, new_clocks->host_router_bw_kbps[i]);
+ }
+ }
+}
+
void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -342,6 +393,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
+ // notify PMFW of bandwidth per DPIA tunnel
+ if (dc->debug.notify_dpia_hr_bw)
+ dcn35_notify_host_router_bw(clk_mgr_base, context, safe_to_lower);
+
// notify DMCUB of latest clocks
memset(&cmd, 0, sizeof(cmd));
cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
@@ -1127,7 +1182,7 @@ void dcn35_clk_mgr_construct(
i, smu_dpm_clks.dpm_clks->MemPstateTable[i].Voltage);
}
- if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
+ if (ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
dcn35_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index 1399b41dfd1c..f6f0e6a33001 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -89,7 +89,8 @@
#define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS
#define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number
#define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported
-#define VBIOSSMC_Message_Count 0x1D
+#define VBIOSSMC_MSG_NotifyHostRouterBW 0x1D
+#define VBIOSSMC_Message_Count 0x1E
#define VBIOSSMC_Status_BUSY 0x0
#define VBIOSSMC_Result_OK 0x1
@@ -98,6 +99,14 @@
#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
#define VBIOSSMC_Result_CmdRejectedBusy 0xFC
+union dcn35_dpia_host_router_bw {
+ struct {
+ uint32_t hr_id : 16;
+ uint32_t bw_mbps : 16;
+ } bits;
+ uint32_t all;
+};
+
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because `the translation in msg_if.h
@@ -487,3 +496,13 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
//smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
return retv;
}
+
+void dcn35_smu_notify_host_router_bw(struct clk_mgr_internal *clk_mgr, uint32_t hr_id, uint32_t bw_kbps)
+{
+ union dcn35_dpia_host_router_bw msg_data = { 0 };
+
+ msg_data.bits.hr_id = hr_id;
+ msg_data.bits.bw_mbps = bw_kbps / 1000;
+
+ dcn35_smu_send_msg_with_param(clk_mgr, VBIOSSMC_MSG_NotifyHostRouterBW, msg_data.all);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
index 06cd3cc6d36e..3fae13c73934 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
@@ -198,4 +198,6 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr);
+void dcn35_smu_notify_host_router_bw(struct clk_mgr_internal *clk_mgr, uint32_t hr_id, uint32_t bw_kbps);
+
#endif /* DAL_DC_35_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 0975986f5989..70f06a7c882e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -791,14 +791,16 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
num_steps++;
}
- } else {
- /* P-State is not supported so force max clocks */
- idle_fclk_mhz =
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1].fclk_mhz;
- active_fclk_mhz = idle_fclk_mhz;
}
}
+ if (!clk_mgr_base->clks.fclk_p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
+ /* when P-State switching disabled, set UCLK min = max */
+ idle_fclk_mhz =
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1].fclk_mhz;
+ active_fclk_mhz = idle_fclk_mhz;
+ }
+
/* UPDATE DCFCLK */
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
@@ -872,19 +874,21 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
block_sequence[num_steps].func = CLK_MGR401_UPDATE_UCLK_PSTATE_SUPPORT;
num_steps++;
}
+ }
+ }
+
+ if (!clk_mgr_base->clks.p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK)) {
+ /* when P-State switching disabled, set UCLK min = max */
+ if (dc->clk_mgr->dc_mode_softmax_enabled) {
+ /* will never have the functional UCLK min above the softmax
+ * since we calculate mode support based on softmax being the max UCLK
+ * frequency.
+ */
+ active_uclk_mhz = clk_mgr_base->bw_params->dc_mode_softmax_memclk;
} else {
- /* when disabling P-State switching, set UCLK min = max */
- if (dc->clk_mgr->dc_mode_softmax_enabled) {
- /* will never have the functional UCLK min above the softmax
- * since we calculate mode support based on softmax being the max UCLK
- * frequency.
- */
- active_uclk_mhz = clk_mgr_base->bw_params->dc_mode_softmax_memclk;
- } else {
- active_uclk_mhz = clk_mgr_base->bw_params->max_memclk_mhz;
- }
- idle_uclk_mhz = active_uclk_mhz;
+ active_uclk_mhz = clk_mgr_base->bw_params->max_memclk_mhz;
}
+ idle_uclk_mhz = active_uclk_mhz;
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
@@ -936,7 +940,7 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
}
/* CLK_MGR401_UPDATE_IDLE_HARDMINS */
- if ((update_idle_uclk || update_idle_uclk) && is_idle_dpm_enabled) {
+ if ((update_idle_uclk || update_idle_fclk) && is_idle_dpm_enabled) {
block_sequence[num_steps].params.update_idle_hardmin_params.uclk_mhz = idle_uclk_mhz;
block_sequence[num_steps].params.update_idle_hardmin_params.fclk_mhz = idle_fclk_mhz;
block_sequence[num_steps].func = CLK_MGR401_UPDATE_IDLE_HARDMINS;
@@ -1455,6 +1459,22 @@ static int dcn401_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base)
return dtb_ref_clk_khz;
}
+static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t dispclk_wdivider;
+ int disp_divider;
+
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
+ disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+
+ /* Return DISPCLK freq in Khz */
+ if (disp_divider)
+ return (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn401_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz,
@@ -1468,6 +1488,7 @@ static struct clk_mgr_funcs dcn401_funcs = {
.are_clock_states_equal = dcn401_are_clock_states_equal,
.enable_pme_wa = dcn401_enable_pme_wa,
.is_smu_present = dcn401_is_smu_present,
+ .get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist,
};
struct clk_mgr_internal *dcn401_clk_mgr_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 0d97611c4817..a4ba6f99cd34 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1233,11 +1233,14 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
*/
if (is_phantom) {
if (tg->funcs->enable_crtc) {
- int main_pipe_width, main_pipe_height;
+ int main_pipe_width = 0, main_pipe_height = 0;
struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
- main_pipe_width = old_paired_stream->dst.width;
- main_pipe_height = old_paired_stream->dst.height;
+ if (old_paired_stream) {
+ main_pipe_width = old_paired_stream->dst.width;
+ main_pipe_height = old_paired_stream->dst.height;
+ }
+
if (dc->hwss.blank_phantom)
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
@@ -1628,6 +1631,9 @@ static void program_timing_sync(
for (k = 0; k < group_size; k++) {
struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
+ if (!status)
+ continue;
+
status->timing_sync_info.group_id = num_group;
status->timing_sync_info.group_size = group_size;
if (k == 0)
@@ -2225,6 +2231,9 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
if (dc_is_embedded_signal(params->streams[i]->signal)) {
struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]);
+ if (!status)
+ continue;
+
if (dc->hwss.is_abm_supported)
status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]);
else
@@ -2631,7 +2640,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (u->plane_info)
format = u->plane_info->format;
- else if (u->surface)
+ else
format = u->surface->format;
if (dce_use_lut(format))
@@ -2732,7 +2741,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->mst_bw_update)
su_flags->bits.mst_bw = 1;
- if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
+ if (stream_update->stream->freesync_on_desktop &&
(stream_update->vrr_infopacket || stream_update->allow_freesync ||
stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
su_flags->bits.fams_changed = 1;
@@ -4023,7 +4032,7 @@ static void commit_planes_for_stream(struct dc *dc,
stream_status =
stream_get_status(context, pipe_ctx->stream);
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface && stream_status)
dc->hwss.apply_ctx_for_surface(
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
@@ -5332,6 +5341,16 @@ bool dc_set_replay_allow_active(struct dc *dc, bool active)
return true;
}
+/* set IPS disable state */
+bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips)
+{
+ dc_exit_ips_for_hw_access(dc);
+
+ dc->config.disable_ips = disable_ips;
+
+ return true;
+}
+
void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
{
if (dc->debug.disable_idle_power_optimizations)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 33318a112282..5037474bf95c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -505,7 +505,7 @@ void set_p_state_switch_method(
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
bool enable_subvp;
- if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba)
return;
if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 234236c43d21..eb053e1791c0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1917,9 +1917,15 @@ int resource_get_opp_heads_for_otg_master(const struct pipe_ctx *otg_master,
struct pipe_ctx *opp_heads[MAX_PIPES])
{
struct pipe_ctx *opp_head = &res_ctx->pipe_ctx[otg_master->pipe_idx];
+ struct dc *dc = otg_master->stream->ctx->dc;
int i = 0;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
if (!resource_is_pipe_type(otg_master, OTG_MASTER)) {
+ DC_LOG_WARNING("%s called from a non OTG master, something "
+ "is wrong in the pipe configuration",
+ __func__);
ASSERT(0);
return 0;
}
@@ -2077,17 +2083,32 @@ int resource_get_odm_slice_index(const struct pipe_ctx *pipe_ctx)
int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
bool is_last_segment)
{
- const struct dc_crtc_timing *timing = &otg_master->stream->timing;
- int count = resource_get_odm_slice_count(otg_master);
- int h_active = timing->h_addressable +
- timing->h_border_left +
- timing->h_border_right;
- int width = h_active / count;
+ const struct dc_crtc_timing *timing;
+ int count;
+ int h_active;
+ int width;
bool two_pixel_alignment_required = false;
- if (otg_master && otg_master->stream_res.tg && otg_master->stream)
- two_pixel_alignment_required = otg_master->stream_res.tg->funcs->is_two_pixels_per_container(timing);
+ if (!otg_master || !otg_master->stream)
+ return 0;
+ timing = &otg_master->stream->timing;
+ count = resource_get_odm_slice_count(otg_master);
+ h_active = timing->h_addressable +
+ timing->h_border_left +
+ timing->h_border_right;
+ width = h_active / count;
+
+ if (otg_master->stream_res.tg)
+ two_pixel_alignment_required =
+ otg_master->stream_res.tg->funcs->is_two_pixels_per_container(timing) ||
+ /*
+ * 422 is sub-sampled horizontally. 1 set of chromas
+ * (Cb/Cr) is shared for 2 lumas (i.e 2 Y values).
+ * Therefore even if 422 is still 1 pixel per container,
+ * ODM segment width still needs to be 2 pixel aligned.
+ */
+ timing->pixel_encoding == PIXEL_ENCODING_YCBCR422;
if ((width % 2) && two_pixel_alignment_required)
width++;
@@ -2596,6 +2617,17 @@ static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx,
}
}
+static int get_num_of_free_pipes(const struct resource_pool *pool, const struct dc_state *context)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < pool->pipe_count; i++)
+ if (resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], FREE_PIPE))
+ count++;
+ return count;
+}
+
enum dc_status resource_add_otg_master_for_stream_output(struct dc_state *new_ctx,
const struct resource_pool *pool,
struct dc_stream_state *stream)
@@ -2729,37 +2761,33 @@ static bool acquire_secondary_dpp_pipes_and_add_plane(
struct dc_state *cur_ctx,
struct resource_pool *pool)
{
- struct pipe_ctx *opp_head_pipe, *sec_pipe, *tail_pipe;
+ struct pipe_ctx *sec_pipe, *tail_pipe;
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ int opp_head_count;
+ int i;
if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe) {
ASSERT(0);
return false;
}
- opp_head_pipe = otg_master_pipe;
- while (opp_head_pipe) {
+ opp_head_count = resource_get_opp_heads_for_otg_master(otg_master_pipe,
+ &new_ctx->res_ctx, opp_heads);
+ if (get_num_of_free_pipes(pool, new_ctx) < opp_head_count)
+ /* not enough free pipes */
+ return false;
+
+ for (i = 0; i < opp_head_count; i++) {
sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe(
cur_ctx,
new_ctx,
pool,
- opp_head_pipe);
- if (!sec_pipe) {
- /* try tearing down MPCC combine */
- int pipe_idx = acquire_first_split_pipe(
- &new_ctx->res_ctx, pool,
- otg_master_pipe->stream);
-
- if (pipe_idx >= 0)
- sec_pipe = &new_ctx->res_ctx.pipe_ctx[pipe_idx];
- }
-
- if (!sec_pipe)
- return false;
-
+ opp_heads[i]);
+ ASSERT(sec_pipe);
sec_pipe->plane_state = plane_state;
/* establish pipe relationship */
- tail_pipe = get_tail_pipe(opp_head_pipe);
+ tail_pipe = get_tail_pipe(opp_heads[i]);
tail_pipe->bottom_pipe = sec_pipe;
sec_pipe->top_pipe = tail_pipe;
sec_pipe->bottom_pipe = NULL;
@@ -2770,8 +2798,6 @@ static bool acquire_secondary_dpp_pipes_and_add_plane(
} else {
sec_pipe->prev_odm_pipe = NULL;
}
-
- opp_head_pipe = opp_head_pipe->next_odm_pipe;
}
return true;
}
@@ -2784,6 +2810,7 @@ bool resource_append_dpp_pipes_for_plane_composition(
struct dc_plane_state *plane_state)
{
bool success;
+
if (otg_master_pipe->plane_state == NULL)
success = add_plane_to_opp_head_pipes(otg_master_pipe,
plane_state, new_ctx);
@@ -2791,10 +2818,15 @@ bool resource_append_dpp_pipes_for_plane_composition(
success = acquire_secondary_dpp_pipes_and_add_plane(
otg_master_pipe, plane_state, new_ctx,
cur_ctx, pool);
- if (success)
+ if (success) {
/* when appending a plane mpc slice count changes from 0 to 1 */
success = update_pipe_params_after_mpc_slice_count_change(
plane_state, new_ctx, pool);
+ if (!success)
+ resource_remove_dpp_pipes_for_plane_composition(new_ctx,
+ pool, plane_state);
+ }
+
return success;
}
@@ -2804,6 +2836,7 @@ void resource_remove_dpp_pipes_for_plane_composition(
const struct dc_plane_state *plane_state)
{
int i;
+
for (i = pool->pipe_count - 1; i >= 0; i--) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -3100,9 +3133,14 @@ bool resource_update_pipes_for_stream_with_slice_count(
int i;
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(
&new_ctx->res_ctx, stream);
- int cur_slice_count = resource_get_odm_slice_count(otg_master);
+ int cur_slice_count;
bool result = true;
+ if (!otg_master)
+ return false;
+
+ cur_slice_count = resource_get_odm_slice_count(otg_master);
+
if (new_slice_count == cur_slice_count)
return result;
@@ -4287,7 +4325,7 @@ static void set_avi_info_frame(
}
if (rid != 0 && fr_ind != 0) {
- hdmi_info.bits.header.version = 5;
+ hdmi_info.bits.header.version = 4;
hdmi_info.bits.header.length = 15;
hdmi_info.bits.FR0_FR3 = fr_ind & 0xF;
@@ -4749,6 +4787,9 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
{
+ if (dc == NULL || stream == NULL)
+ return DC_ERROR_UNEXPECTED;
+
struct dc_link *link = stream->link;
struct timing_generator *tg = dc->res_pool->timing_generators[0];
enum dc_status res = DC_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 8ea9391c60b7..e990346e51f6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -192,15 +192,14 @@ static void init_state(struct dc *dc, struct dc_state *state)
/* Public dc_state functions */
struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
+ struct dc_state *state;
#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_configuration_options *dml2_opt;
+ struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
- dml2_opt = kmemdup(&dc->dml2_options, sizeof(*dml2_opt), GFP_KERNEL);
- if (!dml2_opt)
- return NULL;
+ memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
#endif
- struct dc_state *state = kvzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
+
+ state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL);
if (!state)
return NULL;
@@ -217,8 +216,6 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
dml2_opt->use_clock_dc_limits = true;
dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source);
}
-
- kfree(dml2_opt);
#endif
kref_init(&state->refcount);
@@ -445,6 +442,19 @@ enum dc_status dc_state_remove_stream(
return DC_OK;
}
+static void remove_mpc_combine_for_stream(const struct dc *dc,
+ struct dc_state *new_ctx,
+ const struct dc_state *cur_ctx,
+ struct dc_stream_status *status)
+{
+ int i;
+
+ for (i = 0; i < status->plane_count; i++)
+ resource_update_pipes_for_plane_with_slice_count(
+ new_ctx, cur_ctx, dc->res_pool,
+ status->plane_states[i], 1);
+}
+
bool dc_state_add_plane(
const struct dc *dc,
struct dc_stream_state *stream,
@@ -455,8 +465,12 @@ bool dc_state_add_plane(
struct pipe_ctx *otg_master_pipe;
struct dc_stream_status *stream_status = NULL;
bool added = false;
+ int odm_slice_count;
+ int i;
stream_status = dc_state_get_stream_status(state, stream);
+ otg_master_pipe = resource_get_otg_master_for_stream(
+ &state->res_ctx, stream);
if (stream_status == NULL) {
dm_error("Existing stream not found; failed to attach surface!\n");
goto out;
@@ -464,22 +478,39 @@ bool dc_state_add_plane(
dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
plane_state, MAX_SURFACE_NUM);
goto out;
+ } else if (!otg_master_pipe) {
+ goto out;
}
- if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
- /* ODM combine could prevent us from supporting more planes
- * we will reset ODM slice count back to 1 when all planes have
- * been removed to maximize the amount of planes supported when
- * new planes are added.
- */
- resource_update_pipes_for_stream_with_slice_count(
- state, dc->current_state, dc->res_pool, stream, 1);
+ added = resource_append_dpp_pipes_for_plane_composition(state,
+ dc->current_state, pool, otg_master_pipe, plane_state);
- otg_master_pipe = resource_get_otg_master_for_stream(
- &state->res_ctx, stream);
- if (otg_master_pipe)
+ if (!added) {
+ /* try to remove MPC combine to free up pipes */
+ for (i = 0; i < state->stream_count; i++)
+ remove_mpc_combine_for_stream(dc, state,
+ dc->current_state,
+ &state->stream_status[i]);
added = resource_append_dpp_pipes_for_plane_composition(state,
- dc->current_state, pool, otg_master_pipe, plane_state);
+ dc->current_state, pool,
+ otg_master_pipe, plane_state);
+ }
+
+ if (!added) {
+ /* try to decrease ODM slice count gradually to free up pipes */
+ odm_slice_count = resource_get_odm_slice_count(otg_master_pipe);
+ for (i = odm_slice_count - 1; i > 0; i--) {
+ resource_update_pipes_for_stream_with_slice_count(state,
+ dc->current_state, dc->res_pool, stream,
+ i);
+ added = resource_append_dpp_pipes_for_plane_composition(
+ state,
+ dc->current_state, pool,
+ otg_master_pipe, plane_state);
+ if (added)
+ break;
+ }
+ }
if (added) {
stream_status->plane_states[stream_status->plane_count] =
@@ -539,15 +570,6 @@ bool dc_state_remove_plane(
stream_status->plane_states[stream_status->plane_count] = NULL;
- if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
- /* ODM combine could prevent us from supporting more planes
- * we will reset ODM slice count back to 1 when all planes have
- * been removed to maximize the amount of planes supported when
- * new planes are added.
- */
- resource_update_pipes_for_stream_with_slice_count(
- state, dc->current_state, dc->res_pool, stream, 1);
-
return true;
}
@@ -772,11 +794,16 @@ enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
/* setup subvp meta */
main_stream_status = dc_state_get_stream_status(state, main_stream);
+ if (main_stream_status) {
+ main_stream_status->mall_stream_config.type = SUBVP_MAIN;
+ main_stream_status->mall_stream_config.paired_stream = phantom_stream;
+ }
+
phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
- phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream_status->mall_stream_config.paired_stream = main_stream;
- main_stream_status->mall_stream_config.type = SUBVP_MAIN;
- main_stream_status->mall_stream_config.paired_stream = phantom_stream;
+ if (phantom_stream_status) {
+ phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
+ phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+ }
return res;
}
@@ -785,14 +812,17 @@ enum dc_status dc_state_remove_phantom_stream(const struct dc *dc,
struct dc_state *state,
struct dc_stream_state *phantom_stream)
{
- struct dc_stream_status *main_stream_status;
+ struct dc_stream_status *main_stream_status = NULL;
struct dc_stream_status *phantom_stream_status;
/* reset subvp meta */
phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
- main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream);
- phantom_stream_status->mall_stream_config.type = SUBVP_NONE;
- phantom_stream_status->mall_stream_config.paired_stream = NULL;
+ if (phantom_stream_status) {
+ main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream);
+ phantom_stream_status->mall_stream_config.type = SUBVP_NONE;
+ phantom_stream_status->mall_stream_config.paired_stream = NULL;
+ }
+
if (main_stream_status) {
main_stream_status->mall_stream_config.type = SUBVP_NONE;
main_stream_status->mall_stream_config.paired_stream = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 55e1c19b97f1..9b24f448ce50 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -1006,9 +1006,9 @@ static unsigned int dc_stream_get_max_flickerless_instant_vtotal_delta(struct dc
int safe_refresh_v_total = (int)div64_s64((long long)stream->timing.pix_clk_100hz*100, safe_refresh_hz*stream->timing.h_total);
if (increase)
- return ((stream->timing.v_total - safe_refresh_v_total) >= 0) ? (stream->timing.v_total - safe_refresh_v_total) : 0;
+ return (((int) stream->timing.v_total - safe_refresh_v_total) >= 0) ? (stream->timing.v_total - safe_refresh_v_total) : 0;
- return ((safe_refresh_v_total - stream->timing.v_total) >= 0) ? (safe_refresh_v_total - stream->timing.v_total) : 0;
+ return ((safe_refresh_v_total - (int) stream->timing.v_total) >= 0) ? (safe_refresh_v_total - stream->timing.v_total) : 0;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index a06015165a61..d0d1af451b64 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,13 +55,14 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.286"
+#define DC_VER "3.2.289"
#define MAX_SURFACES 3
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
+#define MAX_HOST_ROUTERS_NUM 2
/* Display Core Interfaces */
struct dc_versions {
@@ -291,6 +292,7 @@ struct dc_caps {
uint8_t subvp_drr_vblank_start_margin_us;
bool cursor_not_scaled;
bool dcmode_power_limits_present;
+ bool sequential_ono;
};
struct dc_bug_wa {
@@ -304,6 +306,7 @@ struct dc_bug_wa {
uint8_t dcfclk : 1;
uint8_t dcfclk_ds: 1;
} clock_update_disable_mask;
+ bool skip_psr_ips_crtc_disable;
//Customer Specific WAs
uint32_t force_backlight_start_level;
};
@@ -592,6 +595,7 @@ struct dc_clocks {
bool prev_p_state_change_support;
bool fclk_prev_p_state_change_support;
int num_ways;
+ int host_router_bw_kbps[MAX_HOST_ROUTERS_NUM];
/*
* @fw_based_mclk_switching
@@ -922,6 +926,7 @@ struct dc_debug_options {
bool disable_z9_mpc;
unsigned int force_fclk_khz;
bool enable_tri_buf;
+ bool ips_disallow_entry;
bool dmub_offload_enabled;
bool dmcub_emulation;
bool disable_idle_power_optimizations;
@@ -1042,7 +1047,7 @@ struct dc_debug_options {
unsigned int force_easf;
unsigned int force_sharpness;
unsigned int force_lls;
- bool edp_oled_no_backlight_enable;
+ bool notify_dpia_hr_bw;
};
@@ -1445,6 +1450,7 @@ struct dc {
} scratch;
struct dml2_configuration_options dml2_options;
+ struct dml2_configuration_options dml2_tmp;
enum dc_acpi_cm_power_state power_state;
};
@@ -2467,6 +2473,8 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable);
bool dc_set_replay_allow_active(struct dc *dc, bool active);
+bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips);
+
void dc_z10_restore(const struct dc *dc);
void dc_z10_save_init(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 0e70b95573ae..b2051c4dd7b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -444,7 +444,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- stream_status = NULL;
if (!pipe->stream)
continue;
@@ -464,7 +463,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- stream_status = NULL;
if (!resource_is_pipe_type(pipe, OTG_MASTER))
continue;
@@ -564,7 +562,7 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
{
struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
+ struct dc_crtc_timing *phantom_timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
uint16_t drr_frame_us = 0;
uint16_t min_drr_supported_us = 0;
@@ -578,6 +576,11 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
uint16_t min_vtotal_supported = 0;
uint16_t max_vtotal_supported = 0;
+ if (!phantom_stream)
+ return;
+
+ phantom_timing = &phantom_stream->timing;
+
pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
@@ -701,7 +704,13 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
+ if (!phantom_stream0)
+ return;
+
phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
+ if (!phantom_stream1)
+ return;
+
phantom_timing0 = &phantom_stream0->timing;
phantom_timing1 = &phantom_stream1->timing;
@@ -756,9 +765,14 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
+ struct dc_crtc_timing *phantom_timing;
uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
+ if (!phantom_stream)
+ return;
+
+ phantom_timing = &phantom_stream->timing;
+
pipe_data->mode = SUBVP;
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total;
@@ -1256,6 +1270,9 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
+ if (dc->work_arounds.skip_psr_ips_crtc_disable)
+ cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true;
+
if (allow_idle) {
volatile struct dmub_shared_state_ips_driver *ips_driver =
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
@@ -1505,6 +1522,8 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return;
+ allow_idle &= (!dc->debug.ips_disallow_entry);
+
if (dc_dmub_srv->idle_allowed == allow_idle)
return;
@@ -1653,6 +1672,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* send global configuration parameters */
global_cmd->config.global.max_allow_delay_us = 100 * 1000; //100ms
global_cmd->config.global.lock_wait_time_us = 5000; //5ms
+ global_cmd->config.global.recovery_timeout_us = 5000; //5ms
+ global_cmd->config.global.hwfq_flip_programming_delay_us = 100; //100us
/* copy static feature configuration */
global_cmd->config.global.features.all = dc->debug.fams2_config.all;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index bc760448a378..582606319764 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -70,6 +70,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
+
// Assign the function to calculate the number of partitions in the line buffer
// This is used to determine the vtap support
switch (plane_state->ctx->dce_version) {
@@ -112,7 +114,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
else
spl_in->basic_in.mpc_combine_v = resource_get_mpc_slice_index(pipe_ctx);
- spl_in->basic_out.odm_combine_factor = resource_get_odm_slice_count(pipe_ctx);
+ populate_splrect_from_rect(&spl_in->basic_out.odm_slice_rect, &odm_slice_src);
+ spl_in->basic_out.odm_combine_factor = 0;
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
// Make spl input basic out info output_size width point to stream h active
spl_in->basic_out.output_size.width =
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index cee012587e6e..21f4af9ab096 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1091,6 +1091,8 @@ struct replay_settings {
uint32_t coasting_vtotal;
/* Coasting vtotal table */
uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ /* Defer Update Coasting vtotal table */
+ uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
uint32_t link_off_frame_count;
/* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 3538e190f217..6a467c49b4a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -603,6 +603,10 @@ static void dccg401_set_dp_dto(
BREAK_TO_DEBUGGER();
return;
}
+ if (!params->refclk_hz) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
if (!dc_is_tmds_signal(params->signal)) {
uint64_t dto_integer;
@@ -881,7 +885,7 @@ static uint8_t dccg401_get_other_enabled_symclk_fe(struct dccg *dccg, uint32_t s
/* for DPMST, this backend could be used by multiple front end.
only disable the backend if this stream_enc_ins is the last active stream enc connected to this back_end*/
uint8_t i;
- for (i = 0; i != link_enc_inst && i < sizeof(fe_clk_en); i++) {
+ for (i = 0; i != link_enc_inst && i < ARRAY_SIZE(fe_clk_en); i++) {
if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst)
num_enabled_symclk_fe++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 63deb5b60548..042a4187fff4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1088,11 +1088,15 @@ static bool dcn401_program_pix_clk(
dto_params.clk_src = DPREFCLK;
if (e) {
- dto_params.pixclk_hz = e->target_pixel_rate_khz * e->mult_factor;
- dto_params.refclk_hz = dtbclk_p_src_clk_khz * e->div_factor;
+ dto_params.pixclk_hz = e->target_pixel_rate_khz;
+ dto_params.pixclk_hz *= e->mult_factor;
+ dto_params.refclk_hz = dtbclk_p_src_clk_khz;
+ dto_params.refclk_hz *= e->div_factor;
} else {
- dto_params.pixclk_hz = pix_clk_params->requested_pix_clk_100hz * 100;
- dto_params.refclk_hz = dtbclk_p_src_clk_khz * 1000;
+ dto_params.pixclk_hz = pix_clk_params->requested_pix_clk_100hz;
+ dto_params.pixclk_hz *= 100;
+ dto_params.refclk_hz = dtbclk_p_src_clk_khz;
+ dto_params.refclk_hz *= 1000;
}
/* enable DP DTO */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 4cdd4dacb761..f5e1d9caee4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -642,8 +642,7 @@ static void dce_mi_program_surface_config(
program_tiling(dce_mi, tiling_info);
program_size_and_rotation(dce_mi, rotation, plane_size);
- if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN &&
- format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
program_grph_pixel_format(dce_mi, format);
}
@@ -663,8 +662,7 @@ static void dce60_mi_program_surface_config(
program_tiling(dce_mi, tiling_info);
dce60_program_size(dce_mi, rotation, plane_size);
- if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN &&
- format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
program_grph_pixel_format(dce_mi, format);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index f4987e96fbf9..0d7e7f3b81a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -258,7 +258,7 @@ bool dmub_abm_set_pipe(struct abm *abm,
{
union dmub_rb_cmd cmd;
struct dc_context *dc = abm->ctx;
- uint32_t ramping_boundary = 0xFFFF;
+ uint8_t ramping_boundary = 0xFF;
memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 3e243e407bb8..ccf153b7a467 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -445,10 +445,13 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
/*
* Get PSR residency from firmware.
*/
-static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst)
+static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency,
+ uint8_t panel_inst, enum psr_residency_mode mode)
{
uint16_t param = (uint16_t)(panel_inst << 8);
+ param |= mode;
+
/* Send gpint command and wait for ack */
dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__PSR_RESIDENCY, param, residency,
DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 289e42070ece..a6e282d950c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -27,6 +27,7 @@
#define _DMUB_PSR_H_
#include "dc_types.h"
+#include "dmub_cmd.h"
struct dc_link;
struct dmub_psr_funcs;
@@ -46,7 +47,7 @@ struct dmub_psr_funcs {
uint8_t panel_inst);
void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst);
void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency,
- uint8_t panel_inst);
+ uint8_t panel_inst, enum psr_residency_mode mode);
void (*psr_set_sink_vtotal_in_psr_active)(struct dmub_psr *dmub,
uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 2873ac8f16fb..5437ebd8bc21 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -1,27 +1,6 @@
-/*
- * Copyright 2023 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc.h"
#include "dc_dmub_srv.h"
@@ -33,26 +12,23 @@
#define MAX_PIPES 6
+static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3};
+static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
+
/*
* Get Replay state from firmware.
*/
static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst)
{
- struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
- /* uint32_t raw_state = 0; */
uint32_t retry_count = 0;
- enum dmub_status status;
do {
// Send gpint command and wait for ack
- status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_REPLAY_STATE, panel_inst, 30);
-
- if (status == DMUB_STATUS_OK) {
- // GPINT was executed, get response
- dmub_srv_get_gpint_response(srv, (uint32_t *)state);
- } else
+ if (!dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__GET_REPLAY_STATE, panel_inst,
+ (uint32_t *)state, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
// Return invalid state when GPINT times out
*state = REPLAY_STATE_INVALID;
+ }
} while (++retry_count <= 1000 && *state == REPLAY_STATE_INVALID);
// Assert if max retry hit
@@ -84,7 +60,7 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait,
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Below loops 1000 x 500us = 500 ms.
* Exit REPLAY may need to wait 1-2 frames to power up. Timeout after at
@@ -102,7 +78,8 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait,
break;
}
- fsleep(500);
+ /* must *not* be fsleep - this can be called from high irq levels */
+ udelay(500);
}
/* assert if max retry hit */
@@ -126,7 +103,7 @@ static void dmub_replay_set_power_opt(struct dmub_replay *dmub, unsigned int pow
cmd.replay_set_power_opt.replay_set_power_opt_data.power_opt = power_opt;
cmd.replay_set_power_opt.replay_set_power_opt_data.panel_inst = panel_inst;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -199,11 +176,11 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
(link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
!link->panel_config.dsc.disable_dsc_edp &&
link->dc->caps.edp_dsc_support)) &&
- link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 /*&&
+ link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&
(!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
sizeof(DP_SINK_DEVICE_STR_ID_1)) ||
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
- sizeof(DP_SINK_DEVICE_STR_ID_2)))*/)
+ sizeof(DP_SINK_DEVICE_STR_ID_2))))
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 1;
else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
@@ -218,42 +195,62 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
* Set coasting vtotal.
*/
static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
- uint16_t coasting_vtotal,
+ uint32_t coasting_vtotal,
uint8_t panel_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
+ struct dmub_rb_cmd_replay_set_coasting_vtotal *pCmd = NULL;
+
+ pCmd = &(cmd.replay_set_coasting_vtotal);
memset(&cmd, 0, sizeof(cmd));
- cmd.replay_set_coasting_vtotal.header.type = DMUB_CMD__REPLAY;
- cmd.replay_set_coasting_vtotal.header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
- cmd.replay_set_coasting_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
- cmd.replay_set_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
+ pCmd->header.type = DMUB_CMD__REPLAY;
+ pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
+ pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
+ pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
+ pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
* Get Replay residency from firmware.
*/
static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
- uint32_t *residency, const bool is_start, const bool is_alpm)
+ uint32_t *residency, const bool is_start, enum pr_residency_mode mode)
{
- struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
uint16_t param = (uint16_t)(panel_inst << 8);
- if (is_alpm)
+ switch (mode) {
+ case PR_RESIDENCY_MODE_PHY:
+ param |= REPLAY_RESIDENCY_FIELD_MODE_PHY;
+ break;
+ case PR_RESIDENCY_MODE_ALPM:
param |= REPLAY_RESIDENCY_FIELD_MODE_ALPM;
+ break;
+ case PR_RESIDENCY_MODE_IPS2:
+ param |= REPLAY_RESIDENCY_REVISION_1;
+ param |= REPLAY_RESIDENCY_FIELD_MODE2_IPS;
+ break;
+ case PR_RESIDENCY_MODE_FRAME_CNT:
+ param |= REPLAY_RESIDENCY_REVISION_1;
+ param |= REPLAY_RESIDENCY_FIELD_MODE2_FRAME_CNT;
+ break;
+ case PR_RESIDENCY_MODE_ENABLEMENT_PERIOD:
+ param |= REPLAY_RESIDENCY_REVISION_1;
+ param |= REPLAY_RESIDENCY_FIELD_MODE2_EN_PERIOD;
+ break;
+ default:
+ break;
+ }
if (is_start)
param |= REPLAY_RESIDENCY_ENABLE;
// Send gpint command and wait for ack
- dmub_srv_send_gpint_command(srv, DMUB_GPINT__REPLAY_RESIDENCY, param, 30);
-
- if (!is_start)
- dmub_srv_get_gpint_response(srv, residency);
- else
+ if (!dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__REPLAY_RESIDENCY, param,
+ residency, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
*residency = 0;
}
@@ -261,20 +258,22 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
* Set REPLAY power optimization flags and coasting vtotal.
*/
static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal)
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
+ struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal *pCmd = NULL;
+
+ pCmd = &(cmd.replay_set_power_opt_and_coasting_vtotal);
memset(&cmd, 0, sizeof(cmd));
- cmd.replay_set_power_opt_and_coasting_vtotal.header.type = DMUB_CMD__REPLAY;
- cmd.replay_set_power_opt_and_coasting_vtotal.header.sub_type =
- DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
- cmd.replay_set_power_opt_and_coasting_vtotal.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
- cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.power_opt = power_opt;
- cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.panel_inst = panel_inst;
- cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
+ pCmd->header.type = DMUB_CMD__REPLAY;
+ pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
+ pCmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ pCmd->replay_set_power_opt_data.power_opt = power_opt;
+ pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
+ pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
+ pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -330,6 +329,30 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_frameupdate_timer.data.frameupdate_count =
cmd_element->timer_data.frameupdate_count;
break;
+ case Replay_Set_Pseudo_VTotal:
+ //Header
+ cmd.replay_set_pseudo_vtotal.header.sub_type =
+ DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL;
+ cmd.replay_set_pseudo_vtotal.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal);
+ //Cmd Body
+ cmd.replay_set_pseudo_vtotal.data.panel_inst =
+ cmd_element->pseudo_vtotal_data.panel_inst;
+ cmd.replay_set_pseudo_vtotal.data.vtotal =
+ cmd_element->pseudo_vtotal_data.vtotal;
+ break;
+ case Replay_Disabled_Adaptive_Sync_SDP:
+ //Header
+ cmd.replay_disabled_adaptive_sync_sdp.header.sub_type =
+ DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP;
+ cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal);
+ //Cmd Body
+ cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst =
+ cmd_element->disabled_adaptive_sync_sdp_data.panel_inst;
+ cmd.replay_disabled_adaptive_sync_sdp.data.force_disabled =
+ cmd_element->disabled_adaptive_sync_sdp_data.force_disabled;
+ break;
case Replay_Msg_Not_Support:
default:
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index 3613aff994d7..e6346c0ffc0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -1,27 +1,6 @@
-/*
- * Copyright 2023 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
#ifndef _DMUB_REPLAY_H_
#define _DMUB_REPLAY_H_
@@ -47,12 +26,12 @@ struct dmub_replay_funcs {
uint8_t panel_inst);
void (*replay_send_cmd)(struct dmub_replay *dmub,
enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element);
- void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal,
+ void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint32_t coasting_vtotal,
uint8_t panel_inst);
void (*replay_residency)(struct dmub_replay *dmub,
- uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm);
+ uint8_t panel_inst, uint32_t *residency, const bool is_start, const enum pr_residency_mode mode);
void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal);
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal);
};
struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index 994fb732a7cb..a0d437f0ce2b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -690,6 +690,9 @@ static void wbscl_set_scaler_filter(
int pair;
uint16_t odd_coef, even_coef;
+ if (!filter)
+ return;
+
for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
for (pair = 0; pair < tap_pairs; pair++) {
even_coef = filter[phase * taps + 2 * pair];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
deleted file mode 100644
index 2d0eb203ab69..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
-#
-# All rights reserved. This notice is intended as a precaution against
-# inadvertent publication and does not imply publication or any waiver
-# of confidentiality. The year included in the foregoing notice is the
-# year of creation of the work.
-#
-# Authors: AMD
-#
-# Makefile for dcn32.
-
-DCN32 = dcn32_mmhubbub.o dcn32_mpc.o \
- dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
- dcn32_hpo_dp_link_encoder.o
-
-AMD_DAL_DCN32 = $(addprefix $(AMDDALPATH)/dc/dcn32/,$(DCN32))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN32)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile
deleted file mode 100644
index c195c47f58b4..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
-#
-# All rights reserved. This notice is intended as a precaution against
-# inadvertent publication and does not imply publication or any waiver
-# of confidentiality. The year included in the foregoing notice is the
-# year of creation of the work.
-#
-# Authors: AMD
-#
-# Makefile for dcn321.
-
-DCN321 = dcn321_dio_link_encoder.o
-
-AMD_DAL_DCN321 = $(addprefix $(AMDDALPATH)/dc/dcn321/,$(DCN321))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN321)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
deleted file mode 100644
index d0fab60e7cd9..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
-#
-# All rights reserved. This notice is intended as a precaution against
-# inadvertent publication and does not imply publication or any waiver
-# of confidentiality. The year included in the foregoing notice is the
-# year of creation of the work.
-#
-# Authors: AMD
-#
-# Makefile for DCN35.
-
-DCN35 = dcn35_dio_stream_encoder.o \
- dcn35_dio_link_encoder.o \
- dcn35_mmhubbub.o dcn35_opp.o dcn35_pg_cntl.o dcn35_dwb.o
-
-AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN35)
diff --git a/drivers/gpu/drm/amd/display/dc/dio/Makefile b/drivers/gpu/drm/amd/display/dc/dio/Makefile
new file mode 100644
index 000000000000..5fa905c2fe55
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dio/Makefile
@@ -0,0 +1,63 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+#
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN32
+###############################################################################
+DIO_DCN32 = dcn32_dio_link_encoder.o dcn32_dio_stream_encoder.o
+
+AMD_DAL_DIO_DCN32 = $(addprefix $(AMDDALPATH)/dc/dio/dcn32/,$(DIO_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN32)
+
+###############################################################################
+# DCN35
+###############################################################################
+DIO_DCN35 = dcn35_dio_link_encoder.o dcn35_dio_stream_encoder.o
+
+AMD_DAL_DIO_DCN35 = $(addprefix $(AMDDALPATH)/dc/dio/dcn35/,$(DIO_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN35)
+
+###############################################################################
+# DCN321
+###############################################################################
+DIO_DCN321 = dcn321_dio_link_encoder.o
+
+AMD_DAL_DIO_DCN321 = $(addprefix $(AMDDALPATH)/dc/dio/dcn321/,$(DIO_DCN321))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN321)
+
+
+###############################################################################
+# DCN401
+###############################################################################
+DIO_DCN401 = dcn401_dio_link_encoder.o dcn401_dio_stream_encoder.o
+
+AMD_DAL_DIO_DCN401 = $(addprefix $(AMDDALPATH)/dc/dio/dcn401/,$(DIO_DCN401))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN401)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.c
index 06907e8a4eda..06907e8a4eda 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.h
index 35d23d9db45e..35d23d9db45e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
index 1a9bb614c41e..1a9bb614c41e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.h
index ca53d39561d2..ca53d39561d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c
index 05783daa62ac..05783daa62ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.h
index 2205f39b0a24..2205f39b0a24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
index 20f810a6646c..d4a3e811aa39 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
@@ -183,13 +183,13 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
-
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
+
enc10->base.transmitter = init_data->transmitter;
/* set the flag to indicate whether driver poll the I2C data pin
@@ -255,10 +255,6 @@ void dcn35_link_encoder_construct(
enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
- if (bp_cap_info.DP_IS_USB_C) {
- /*BIOS not switch to use CONNECTOR_ID_USBC = 24 yet*/
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
- }
} else {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
index d546a3676304..d546a3676304 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
index 1325db3a4ed0..6a179e5ab417 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
@@ -392,7 +392,7 @@ static void enc35_reset_fifo(struct stream_encoder *enc, bool reset)
udelay(10);
}
-static void enc35_disable_fifo(struct stream_encoder *enc)
+void enc35_disable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -401,7 +401,7 @@ static void enc35_disable_fifo(struct stream_encoder *enc)
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
}
-static void enc35_enable_fifo(struct stream_encoder *enc)
+void enc35_enable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.h
index 1212fcee38f2..ddb33fdfb4ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.h
@@ -322,5 +322,11 @@ void enc3_dp_set_dsc_pps_info_packet(
uint8_t *dsc_packed_pps,
bool immediate_update);
+void enc35_disable_fifo(
+ struct stream_encoder *enc);
+
+void enc35_enable_fifo(
+ struct stream_encoder *enc);
+
#endif /* __DC_DIO_STREAM_ENCODER_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.c
index 7e558ca195ef..7e558ca195ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.h
index 6baab8302b81..6baab8302b81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index 2ebfca4769aa..0a27e0942a12 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -27,6 +27,7 @@
#include "dc_bios_types.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn32/dcn32_dio_stream_encoder.h"
+#include "dcn35/dcn35_dio_stream_encoder.h"
#include "dcn401_dio_stream_encoder.h"
#include "reg_helper.h"
@@ -764,7 +765,8 @@ static const struct stream_encoder_funcs dcn401_str_enc_funcs = {
.enable_stream = enc401_stream_encoder_enable,
.set_input_mode = enc401_set_dig_input_mode,
- .enable_fifo = enc32_enable_fifo,
+ .enable_fifo = enc35_enable_fifo,
+ .disable_fifo = enc35_disable_fifo,
.map_stream_to_link = enc401_stream_encoder_map_to_link,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
index d751839598f8..d751839598f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index f1cde1e4265f..39525721c976 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -690,7 +690,7 @@ static void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
unsigned int pixel_rate_100hz)
{
- float pixel_rate_mhz = pixel_rate_100hz / 10000;
+ float pixel_rate_mhz = pixel_rate_100hz / 10000.0;
/*
* force enabling pipe split by lower dpp clock for DPM0 to just
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 74da9ebda016..54dd7e164635 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1882,10 +1882,10 @@ void dcn20_update_bounding_box(struct dc *dc,
bb->clock_limits[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
min_dcfclk : min_fclk_required_by_uclk;
- bb->clock_limits[i].socclk_mhz = (bb->clock_limits[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ?
+ bb->clock_limits[i].socclk_mhz = (bb->clock_limits[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000.0) ?
max_clocks->socClockInKhz / 1000 : bb->clock_limits[i].fabricclk_mhz;
- bb->clock_limits[i].dcfclk_mhz = (bb->clock_limits[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ?
+ bb->clock_limits[i].dcfclk_mhz = (bb->clock_limits[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000.0) ?
max_clocks->dcfClockInKhz / 1000 : bb->clock_limits[i].fabricclk_mhz;
bb->clock_limits[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000;
@@ -1917,35 +1917,35 @@ void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
// First pass - cap all clocks higher than the reported max
for (i = 0; i < bb->num_states; i++) {
- if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000))
+ if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000.0))
&& max_clocks.dcfClockInKhz != 0)
bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000);
- if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16)
+ if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000.0) * 16)
&& max_clocks.uClockInKhz != 0)
bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
- if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
+ if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000.0))
&& max_clocks.fabricClockInKhz != 0)
bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
- if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000))
+ if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000.0))
&& max_clocks.displayClockInKhz != 0)
bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000);
- if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000))
+ if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000.0))
&& max_clocks.dppClockInKhz != 0)
bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000);
- if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000))
+ if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000.0))
&& max_clocks.phyClockInKhz != 0)
bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000);
- if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000))
+ if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000.0))
&& max_clocks.socClockInKhz != 0)
bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000);
- if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000))
+ if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000.0))
&& max_clocks.dscClockInKhz != 0)
bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 7bf4bb7ad044..565f3c492477 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -1017,7 +1017,7 @@ static unsigned int CalculateVMAndRowBytes(
if (ScanDirection == dm_horz)
FractionOfPTEReturnDrop = 0;
else
- FractionOfPTEReturnDrop = 7 / 8;
+ FractionOfPTEReturnDrop = 7.0 / 8;
} else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeight = 16 * BlockHeight256Bytes;
PixelPTEReqWidth = 16 * BlockWidth256Bytes;
@@ -3231,22 +3231,22 @@ static unsigned int TruncToValidBPP(
if (Format == dm_420) {
if (DecimalBPP < 6)
return BPP_INVALID;
- else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
- return 1.5 * DSCInputBitPerComponent - 1 / 16;
+ else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1.0 / 16)
+ return 1.5 * DSCInputBitPerComponent - 1.0 / 16;
else
return dml_floor(16 * DecimalBPP, 1) / 16;
} else if (Format == dm_n422) {
if (DecimalBPP < 7)
return BPP_INVALID;
- else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
- return 2 * DSCInputBitPerComponent - 1 / 16;
+ else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1.0 / 16)
+ return 2 * DSCInputBitPerComponent - 1.0 / 16;
else
return dml_floor(16 * DecimalBPP, 1) / 16;
} else {
if (DecimalBPP < 8)
return BPP_INVALID;
- else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
- return 3 * DSCInputBitPerComponent - 1 / 16;
+ else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1.0 / 16)
+ return 3 * DSCInputBitPerComponent - 1.0 / 16;
else
return dml_floor(16 * DecimalBPP, 1) / 16;
}
@@ -4322,7 +4322,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->RoundedUpMaxSwathSizeBytesC = 0;
}
- if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte[0] * 1024 / 2) {
+ if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte[0] * 1024.0 / 2) {
locals->SwathHeightYPerState[i][j][k] = locals->MaxSwathHeightY[k];
locals->SwathHeightCPerState[i][j][k] = locals->MaxSwathHeightC[k];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 989d83ee3842..9d6675ecc5f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -1077,7 +1077,7 @@ static unsigned int CalculateVMAndRowBytes(
if (ScanDirection == dm_horz)
FractionOfPTEReturnDrop = 0;
else
- FractionOfPTEReturnDrop = 7 / 8;
+ FractionOfPTEReturnDrop = 7.0 / 8;
} else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeight = 16 * BlockHeight256Bytes;
PixelPTEReqWidth = 16 * BlockWidth256Bytes;
@@ -4443,7 +4443,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->RoundedUpMaxSwathSizeBytesC = 0;
}
- if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte[0] * 1024 / 2) {
+ if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte[0] * 1024.0 / 2) {
locals->SwathHeightYPerState[i][j][k] = locals->MaxSwathHeightY[k];
locals->SwathHeightCPerState[i][j][k] = locals->MaxSwathHeightC[k];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 548cdef8a8ad..07146569e335 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -1156,11 +1156,6 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
- hscale_pixel_rate_l = 0.;
- hscale_pixel_rate_c = 0.;
- min_hratio_fact_l = 1.0;
- min_hratio_fact_c = 1.0;
-
if (htaps_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 0fc9f3e3ffae..f4bba1f2aeb6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -1157,11 +1157,6 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
- hscale_pixel_rate_l = 0.;
- hscale_pixel_rate_c = 0.;
- min_hratio_fact_l = 1.0;
- min_hratio_fact_c = 1.0;
-
if (htaps_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 57cf0358cc43..eb3ed965e48b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -1399,7 +1399,7 @@ static unsigned int CalculateVMAndRowBytes(
if (ScanDirection == dm_horz)
FractionOfPTEReturnDrop = 0;
else
- FractionOfPTEReturnDrop = 7 / 8;
+ FractionOfPTEReturnDrop = 7.0 / 8;
} else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 618f4b682ab1..c229a9edf82a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -297,9 +297,6 @@ static void handle_det_buf_split(
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
-
- if (req128_c && log2_swath_height_c > 0)
- log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
@@ -1208,11 +1205,6 @@ static void dml_rq_dlg_get_dlg_params(
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
- hscale_pixel_rate_l = 0.;
- hscale_pixel_rate_c = 0.;
- min_hratio_fact_l = 1.0;
- min_hratio_fact_c = 1.0;
-
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index e0b52db2c210..1c10ba4dcdde 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1783,7 +1783,7 @@ static unsigned int CalculateVMAndRowBytes(
if (ScanDirection != dm_vert)
FractionOfPTEReturnDrop = 0;
else
- FractionOfPTEReturnDrop = 7 / 8;
+ FractionOfPTEReturnDrop = 7.0 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 0497a5d74a62..f3ee7baac786 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -1309,11 +1309,6 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
- hscale_pixel_rate_l = 0.;
- hscale_pixel_rate_c = 0.;
- min_hratio_fact_l = 1.0;
- min_hratio_fact_c = 1.0;
-
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 33cf824c5da1..0b132ce1d2cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1932,7 +1932,7 @@ static unsigned int CalculateVMAndRowBytes(
if (ScanDirection != dm_vert)
FractionOfPTEReturnDrop = 0;
else
- FractionOfPTEReturnDrop = 7 / 8;
+ FractionOfPTEReturnDrop = 7.0 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
@@ -3617,7 +3617,7 @@ static double TruncToValidBPP(
NonDSCBPP1 = 15;
NonDSCBPP2 = 18;
MinDSCBPP = 6;
- MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16;
+ MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
} else if (Format == dm_444) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 4113ce79c4af..b6d954d9aa00 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -1150,11 +1150,6 @@ static void dml_rq_dlg_get_dlg_params(
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
- hscale_pixel_rate_l = 0.;
- hscale_pixel_rate_c = 0.;
- min_hratio_fact_l = 1.0;
- min_hratio_fact_c = 1.0;
-
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index f52b9e3d2bee..debfa31583a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -1941,15 +1941,6 @@ static unsigned int CalculateVMAndRowBytes(
*PixelPTEReqWidth = 32768.0 / BytePerPixel;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
- } else if (MacroTileSizeBytes == 4096) {
- PixelPTEReqHeightPTEs = 1;
- *PixelPTEReqHeight = MacroTileHeight;
- *PixelPTEReqWidth = 8 * *MacroTileWidth;
- *PTERequestSize = 64;
- if (ScanDirection != dm_vert)
- FractionOfPTEReturnDrop = 0;
- else
- FractionOfPTEReturnDrop = 7 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
@@ -3723,7 +3714,7 @@ static double TruncToValidBPP(
NonDSCBPP1 = 15;
NonDSCBPP2 = 18;
MinDSCBPP = 6;
- MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16;
+ MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
} else if (Format == dm_444) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index b3e8dc08030c..94975b0fa398 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -1238,11 +1238,6 @@ static void dml_rq_dlg_get_dlg_params(
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
- hscale_pixel_rate_l = 0.;
- hscale_pixel_rate_c = 0.;
- min_hratio_fact_l = 1.0;
- min_hratio_fact_c = 1.0;
-
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 194422dd979d..7abf8b88ca91 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -738,9 +738,9 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
/* Loop to calculate the maximum microschedule time between the two SubVP pipes,
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
- if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
+ phantom = dc_state_get_paired_subvp_stream(context, pipe->stream);
+ if (phantom && pipe->stream && pipe->plane_state && !pipe->top_pipe &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
- phantom = dc_state_get_paired_subvp_stream(context, pipe->stream);
microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
phantom->timing.v_addressable;
@@ -845,8 +845,8 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
}
}
- if (subvp_found && drr_found) {
- phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream);
+ phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream);
+ if (phantom_stream && subvp_found && drr_found) {
main_timing = &pipe->stream->timing;
phantom_timing = &phantom_stream->timing;
drr_timing = &drr_pipe->stream->timing;
@@ -1177,6 +1177,9 @@ static void init_pipe_slice_table_from_context(
stream = context->streams[i];
otg_master = resource_get_otg_master_for_stream(
&context->res_ctx, stream);
+ if (!otg_master)
+ continue;
+
count = resource_get_odm_slice_count(otg_master);
update_slice_table_for_stream(table, stream, count);
@@ -2154,6 +2157,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+ for (i = 0; i < context->stream_count; i++)
+ resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
if (!pipe_cnt) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index ba1310c8fd77..d92fb428ee96 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1401,13 +1401,13 @@ void dml32_CalculateOutputLink(
if (Output == dm_dp2p0) {
*OutBpp = 0;
if ((OutputLinkDPRate == dm_dp_rate_na || OutputLinkDPRate == dm_dp_rate_uhbr10) &&
- PHYCLKD32PerState >= 10000 / 32) {
+ PHYCLKD32PerState >= 10000.0 / 32) {
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 10000,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat,
DSCInputBitPerComponent, NumberOfDSCSlices, AudioSampleRate,
AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && PHYCLKD32PerState < 13500 / 32 && DSCEnable == true &&
+ if (*OutBpp == 0 && PHYCLKD32PerState < 13500.0 / 32 && DSCEnable == true &&
ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
@@ -1423,7 +1423,7 @@ void dml32_CalculateOutputLink(
*OutputRate = dm_output_rate_dp_rate_uhbr10;
}
if ((OutputLinkDPRate == dm_dp_rate_na || OutputLinkDPRate == dm_dp_rate_uhbr13p5) &&
- *OutBpp == 0 && PHYCLKD32PerState >= 13500 / 32) {
+ *OutBpp == 0 && PHYCLKD32PerState >= 13500.0 / 32) {
*OutBpp = dml32_TruncToValidBPP((1 - Downspreading / 100) * 13500,
OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd,
ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat,
@@ -1601,7 +1601,7 @@ double dml32_TruncToValidBPP(
NonDSCBPP1 = 15;
NonDSCBPP2 = 18;
MinDSCBPP = 6;
- MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16;
+ MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
} else if (Format == dm_444) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 60f251cf973b..beed7adbbd43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -177,7 +177,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
- .dram_clock_change_latency_us = 11.72,
+ .dram_clock_change_latency_us = 34.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index e4f333d4fb54..a201dbb743d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -215,7 +215,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
- .dram_clock_change_latency_us = 11.72,
+ .dram_clock_change_latency_us = 34,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 3df559c591f8..dae13f202220 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -1596,11 +1596,6 @@ void dml1_rq_dlg_get_dlg_params(
swath_width_pixels_ub_c = swath_width_ub_c * 1;
}
- hscale_pixel_rate_l = 0.;
- hscale_pixel_rate_c = 0.;
- min_hratio_fact_l = 1.0;
- min_hratio_fact_c = 1.0;
-
if (htaps_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index ff2adc9eab0d..547dfcc80fde 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -115,6 +115,7 @@ static void CalculateODMMode(
dml_float_t DISPCLKDPPCLKDSCCLKDownSpreading,
dml_float_t DISPCLKRampingMargin,
dml_float_t DISPCLKDPPCLKVCOSpeed,
+ dml_uint_t NumberOfDSCSlices,
// Output
dml_bool_t *TotalAvailablePipesSupport,
@@ -2732,7 +2733,7 @@ static dml_float_t TruncToValidBPP(
NonDSCBPP1 = 15;
NonDSCBPP2 = 18;
MinDSCBPP = 6;
- MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16;
+ MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
} else if (Format == dml_444) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
@@ -4282,7 +4283,7 @@ static void CalculateSwathAndDETConfiguration(struct display_mode_lib_scratch_st
}
*p->compbuf_reserved_space_64b = 2 * p->PixelChunkSizeInKByte * 1024 / 64;
- if (p->UnboundedRequestEnabled) {
+ if (*p->UnboundedRequestEnabled) {
*p->compbuf_reserved_space_64b = dml_max(*p->compbuf_reserved_space_64b,
(dml_float_t)(p->ROBBufferSizeInKByte * 1024/64)
- (dml_float_t)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / MAXIMUMCOMPRESSION/64));
@@ -5403,10 +5404,10 @@ static void CalculateOutputLink(
}
if (Output == dml_dp2p0) {
*OutBpp = 0;
- if ((OutputLinkDPRate == dml_dp_rate_na || OutputLinkDPRate == dml_dp_rate_uhbr10) && PHYCLKD32PerState >= 10000 / 32) {
+ if ((OutputLinkDPRate == dml_dp_rate_na || OutputLinkDPRate == dml_dp_rate_uhbr10) && PHYCLKD32PerState >= 10000 / 32.0) {
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && PHYCLKD32PerState < 13500 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
+ if (*OutBpp == 0 && PHYCLKD32PerState < 13500 / 32.0 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
@@ -5416,7 +5417,7 @@ static void CalculateOutputLink(
*OutputType = dml_output_type_dp2p0;
*OutputRate = dml_output_rate_dp_rate_uhbr10;
}
- if ((OutputLinkDPRate == dml_dp_rate_na || OutputLinkDPRate == dml_dp_rate_uhbr13p5) && *OutBpp == 0 && PHYCLKD32PerState >= 13500 / 32) {
+ if ((OutputLinkDPRate == dml_dp_rate_na || OutputLinkDPRate == dml_dp_rate_uhbr13p5) && *OutBpp == 0 && PHYCLKD32PerState >= 13500 / 32.0) {
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
@@ -5516,6 +5517,7 @@ static void CalculateODMMode(
dml_float_t DISPCLKDPPCLKDSCCLKDownSpreading,
dml_float_t DISPCLKRampingMargin,
dml_float_t DISPCLKDPPCLKVCOSpeed,
+ dml_uint_t NumberOfDSCSlices,
// Output
dml_bool_t *TotalAvailablePipesSupport,
@@ -5563,7 +5565,7 @@ static void CalculateODMMode(
*NumberOfDPP = 0;
if (!(Output == dml_hdmi || Output == dml_dp || Output == dml_edp) && (ODMUse == dml_odm_use_policy_combine_4to1 || (ODMUse == dml_odm_use_policy_combine_as_needed &&
- (SurfaceRequiredDISPCLKWithODMCombineTwoToOne > StateDispclk || (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit)))))) {
+ (SurfaceRequiredDISPCLKWithODMCombineTwoToOne > StateDispclk || (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit)) || NumberOfDSCSlices > 8)))) {
if (TotalNumberOfActiveDPP + 4 <= MaxNumDPP) {
*ODMMode = dml_odm_mode_combine_4to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
@@ -5573,7 +5575,7 @@ static void CalculateODMMode(
}
} else if (Output != dml_hdmi && (ODMUse == dml_odm_use_policy_combine_2to1 || (ODMUse == dml_odm_use_policy_combine_as_needed &&
((SurfaceRequiredDISPCLKWithoutODMCombine > StateDispclk && SurfaceRequiredDISPCLKWithODMCombineTwoToOne <= StateDispclk) ||
- (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit)))))) {
+ (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit)) || (NumberOfDSCSlices <= 8 && NumberOfDSCSlices > 4))))) {
if (TotalNumberOfActiveDPP + 2 <= MaxNumDPP) {
*ODMMode = dml_odm_mode_combine_2to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
@@ -5880,11 +5882,11 @@ static dml_uint_t DSCDelayRequirement(
if (DSCEnabled == true && OutputBpp != 0) {
if (ODMMode == dml_odm_mode_combine_4to1) {
- DSCDelayRequirement_val = 4 * (dscceComputeDelay(DSCInputBitPerComponent, OutputBpp, (dml_uint_t)(dml_ceil((dml_float_t) HActive / (dml_float_t) NumberOfDSCSlices, 1.0)),
- (dml_uint_t) (NumberOfDSCSlices / 4.0), OutputFormat, Output) + dscComputeDelay(OutputFormat, Output));
+ DSCDelayRequirement_val = dscceComputeDelay(DSCInputBitPerComponent, OutputBpp, (dml_uint_t)(dml_ceil((dml_float_t) HActive / (dml_float_t) NumberOfDSCSlices, 1.0)),
+ (dml_uint_t) (NumberOfDSCSlices / 4.0), OutputFormat, Output) + dscComputeDelay(OutputFormat, Output);
} else if (ODMMode == dml_odm_mode_combine_2to1) {
- DSCDelayRequirement_val = 2 * (dscceComputeDelay(DSCInputBitPerComponent, OutputBpp, (dml_uint_t)(dml_ceil((dml_float_t) HActive / (dml_float_t) NumberOfDSCSlices, 1.0)),
- (dml_uint_t) (NumberOfDSCSlices / 2.0), OutputFormat, Output) + dscComputeDelay(OutputFormat, Output));
+ DSCDelayRequirement_val = dscceComputeDelay(DSCInputBitPerComponent, OutputBpp, (dml_uint_t)(dml_ceil((dml_float_t) HActive / (dml_float_t) NumberOfDSCSlices, 1.0)),
+ (dml_uint_t) (NumberOfDSCSlices / 2.0), OutputFormat, Output) + dscComputeDelay(OutputFormat, Output);
} else {
DSCDelayRequirement_val = dscceComputeDelay(DSCInputBitPerComponent, OutputBpp, (dml_uint_t)((dml_float_t) dml_ceil(HActive / (dml_float_t) NumberOfDSCSlices, 1.0)),
NumberOfDSCSlices, OutputFormat, Output) + dscComputeDelay(OutputFormat, Output);
@@ -6938,20 +6940,25 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
/*Number Of DSC Slices*/
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.cache_display_cfg.plane.BlendingAndTiming[k] == k) {
- if (mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] > 4800) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = (dml_uint_t)(dml_ceil(mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] / 600, 4));
- } else if (mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] > 2400) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 8;
- } else if (mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] > 1200) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 4;
- } else if (mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] > 340) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 2;
- } else {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 1;
+ if (mode_lib->ms.cache_display_cfg.plane.BlendingAndTiming[k] == k &&
+ mode_lib->ms.cache_display_cfg.output.DSCEnable[k] != dml_dsc_disable) {
+ mode_lib->ms.support.NumberOfDSCSlices[k] = mode_lib->ms.cache_display_cfg.output.DSCSlices[k];
+
+ if (mode_lib->ms.support.NumberOfDSCSlices[k] == 0) {
+ if (mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] > 4800) {
+ mode_lib->ms.support.NumberOfDSCSlices[k] = (dml_uint_t)(dml_ceil(mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] / 600, 4));
+ } else if (mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] > 2400) {
+ mode_lib->ms.support.NumberOfDSCSlices[k] = 8;
+ } else if (mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] > 1200) {
+ mode_lib->ms.support.NumberOfDSCSlices[k] = 4;
+ } else if (mode_lib->ms.cache_display_cfg.output.PixelClockBackEnd[k] > 340) {
+ mode_lib->ms.support.NumberOfDSCSlices[k] = 2;
+ } else {
+ mode_lib->ms.support.NumberOfDSCSlices[k] = 1;
+ }
}
} else {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 0;
+ mode_lib->ms.support.NumberOfDSCSlices[k] = 1;
}
}
@@ -7050,6 +7057,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
mode_lib->ms.soc.dcn_downspread_percent,
mode_lib->ms.ip.dispclk_ramp_margin_percent,
mode_lib->ms.soc.dispclk_dppclk_vco_speed_mhz,
+ mode_lib->ms.support.NumberOfDSCSlices[k],
/* Output */
&s->TotalAvailablePipesSupportNoDSC,
@@ -7072,6 +7080,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
mode_lib->ms.soc.dcn_downspread_percent,
mode_lib->ms.ip.dispclk_ramp_margin_percent,
mode_lib->ms.soc.dispclk_dppclk_vco_speed_mhz,
+ mode_lib->ms.support.NumberOfDSCSlices[k],
/* Output */
&s->TotalAvailablePipesSupportDSC,
@@ -9692,7 +9701,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ dml_max(1.0, dml_ceil((dml_float_t) locals->WritebackDelay[k] / ((dml_float_t) mode_lib->ms.cache_display_cfg.timing.HTotal[k] / mode_lib->ms.cache_display_cfg.timing.PixelClock[k]), 1.0))
+ dml_floor(4.0 * locals->TSetup[k] / ((dml_float_t) mode_lib->ms.cache_display_cfg.timing.HTotal[k] / mode_lib->ms.cache_display_cfg.timing.PixelClock[k]), 1.0) / 4.0;
- if (((locals->VUpdateOffsetPix[k] + locals->VUpdateWidthPix[k] + locals->VReadyOffsetPix[k]) / mode_lib->ms.cache_display_cfg.timing.HTotal[k]) <=
+ if (((locals->VUpdateOffsetPix[k] + locals->VUpdateWidthPix[k] + locals->VReadyOffsetPix[k]) / (double) mode_lib->ms.cache_display_cfg.timing.HTotal[k]) <=
(isInterlaceTiming ?
dml_floor((mode_lib->ms.cache_display_cfg.timing.VTotal[k] - mode_lib->ms.cache_display_cfg.timing.VActive[k] - mode_lib->ms.cache_display_cfg.timing.VFrontPorch[k] - locals->VStartup[k]) / 2.0, 1.0) :
(int) (mode_lib->ms.cache_display_cfg.timing.VTotal[k] - mode_lib->ms.cache_display_cfg.timing.VActive[k] - mode_lib->ms.cache_display_cfg.timing.VFrontPorch[k] - locals->VStartup[k]))) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
index f595e48ae7b8..f951936bb579 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
@@ -575,6 +575,7 @@ struct dml_output_cfg_st {
dml_uint_t AudioSampleRate[__DML_NUM_PLANES__];
dml_uint_t AudioSampleLayout[__DML_NUM_PLANES__];
dml_bool_t OutputDisabled[__DML_NUM_PLANES__];
+ dml_uint_t DSCSlices[__DML_NUM_PLANES__];
}; // dml_timing_cfg_st;
/// @brief Writeback Setting
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index a7d02da16bb5..d5ead0205053 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -102,9 +102,7 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table;
/* override clocks if smu is present */
- if (in_dc->clk_mgr &&
- in_dc->clk_mgr->funcs->is_smu_present &&
- in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) {
+ if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) {
/* dcfclk */
if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index 8c9e95b25eb3..d276458e50fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -85,8 +85,8 @@ void find_pipe_regs_idx(const struct dml2_context *dml_ctx,
int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
struct dc_state *context,
struct dml2_context *dml_ctx,
- struct pipe_ctx **dc_main_pipes,
- struct pipe_ctx **dc_phantom_pipes,
+ struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__],
+ struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__],
int dml_plane_idx)
{
unsigned int dml_stream_index;
@@ -100,11 +100,16 @@ int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
struct dc_plane_state *dc_phantom_plane;
int num_pipes = 0;
+ memset(dc_main_pipes, 0, sizeof(struct pipe_ctx *) * __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ memset(dc_phantom_pipes, 0, sizeof(struct pipe_ctx *) * __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+
dml_stream_index = dml_ctx->v21.mode_programming.programming->plane_programming[dml_plane_idx].plane_descriptor->stream_index;
main_stream_id = dml_ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_stream_index];
dc_main_stream = dml_ctx->config.callbacks.get_stream_from_id(context, main_stream_id);
dc_main_stream_status = dml_ctx->config.callbacks.get_stream_status(context, dc_main_stream);
+ if (!dc_main_stream_status)
+ return num_pipes;
/* find main plane based on id */
dc_plane_index = dml21_get_dc_plane_idx_from_plane_id(dml_ctx->v21.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[dml_plane_idx]);
@@ -115,7 +120,8 @@ int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
} else {
/* stream was configured with dummy plane, so get pipes from opp head */
struct pipe_ctx *otg_master_pipe = dml_ctx->config.callbacks.get_otg_master_for_stream(&context->res_ctx, dc_main_stream);
- num_pipes = dml_ctx->config.callbacks.get_opp_heads_for_otg_master(otg_master_pipe, &context->res_ctx, dc_main_pipes);
+ if (otg_master_pipe != NULL)
+ num_pipes = dml_ctx->config.callbacks.get_opp_heads_for_otg_master(otg_master_pipe, &context->res_ctx, dc_main_pipes);
}
/* if phantom exists, find associated pipes */
@@ -123,10 +129,15 @@ int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
if (dc_phantom_stream && num_pipes > 0) {
dc_phantom_stream_status = dml_ctx->config.callbacks.get_stream_status(context, dc_phantom_stream);
- /* phantom plane will have same index as main */
- dc_phantom_plane = dc_phantom_stream_status->plane_states[dc_plane_index];
+ if (dc_phantom_stream_status) {
+ /* phantom plane will have same index as main */
+ dc_phantom_plane = dc_phantom_stream_status->plane_states[dc_plane_index];
- dml_ctx->config.callbacks.get_dpp_pipes_for_plane(dc_phantom_plane, &context->res_ctx, dc_phantom_pipes);
+ if (dc_phantom_plane) {
+ /* only care about phantom pipes if they contain the phantom plane */
+ dml_ctx->config.callbacks.get_dpp_pipes_for_plane(dc_phantom_plane, &context->res_ctx, dc_phantom_pipes);
+ }
+ }
}
return num_pipes;
@@ -317,6 +328,8 @@ static struct dc_stream_state *dml21_add_phantom_stream(struct dml2_context *dml
struct dml2_stream_parameters *phantom_stream_descriptor = &stream_programming->phantom_stream.descriptor;
phantom_stream = dml_ctx->config.svp_pstate.callbacks.create_phantom_stream(dc, context, main_stream);
+ if (!phantom_stream)
+ return NULL;
/* copy details of phantom stream from main */
memcpy(&phantom_stream->timing, &main_stream->timing, sizeof(phantom_stream->timing));
@@ -352,6 +365,8 @@ static struct dc_plane_state *dml21_add_phantom_plane(struct dml2_context *dml_c
struct dc_plane_state *phantom_plane;
phantom_plane = dml_ctx->config.svp_pstate.callbacks.create_phantom_plane(dc, context, main_plane);
+ if (!phantom_plane)
+ return NULL;
phantom_plane->format = main_plane->format;
phantom_plane->rotation = main_plane->rotation;
@@ -401,7 +416,7 @@ void dml21_handle_phantom_streams_planes(const struct dc *dc, struct dc_state *c
main_stream_status = dml_ctx->config.callbacks.get_stream_status(context, main_stream);
- if (main_stream_status->plane_count == 0)
+ if (!main_stream_status || main_stream_status->plane_count == 0)
continue;
/* create phantom stream for subvp enabled stream */
@@ -411,6 +426,9 @@ void dml21_handle_phantom_streams_planes(const struct dc *dc, struct dc_state *c
main_stream,
&dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_index]);
+ if (!phantom_stream)
+ continue;
+
/* iterate through DML planes associated with this stream */
for (dml_plane_index = 0; dml_plane_index < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_plane_index++) {
if (dml_ctx->v21.mode_programming.programming->plane_programming[dml_plane_index].plane_descriptor->stream_index == dml_stream_index) {
@@ -499,6 +517,9 @@ void dml21_build_fams2_programming(const struct dc *dc,
break;
case FAMS2_STREAM_TYPE_SUBVP:
phantom_stream = dml_ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, stream);
+ if (!phantom_stream)
+ break;
+
phantom_status = dml_ctx->config.callbacks.get_stream_status(context, phantom_stream);
/* phantom status should always be present */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
index 82080397a50e..d5153fbac921 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
@@ -33,8 +33,8 @@ void find_pipe_regs_idx(const struct dml2_context *dml_ctx,
int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
struct dc_state *context,
struct dml2_context *dml_ctx,
- struct pipe_ctx **dc_main_pipes,
- struct pipe_ctx **dc_phantom_pipes,
+ struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__],
+ struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__],
int dml_plane_idx);
void dml21_program_dc_pipe(struct dml2_context *dml_ctx,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 08f001cb61c5..b442e1f9f204 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -156,7 +156,7 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
dml21_program_dc_pipe(in_ctx, context, dc_main_pipes[dc_pipe_index], pln_prog, stream_prog);
- if (pln_prog->phantom_plane.valid) {
+ if (pln_prog->phantom_plane.valid && dc_phantom_pipes[dc_pipe_index]) {
dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog);
}
}
@@ -325,7 +325,10 @@ void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context
}
/* get config for each phantom pipe */
- if (pln_prog->phantom_plane.valid) {
+ if (pln_prog->phantom_plane.valid &&
+ dc_phantom_pipes[0] &&
+ dc_main_pipes[0]->stream &&
+ dc_phantom_pipes[0]->plane_state) {
mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_phantom_prog_idx];
memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor));
mcache_config->plane_descriptor = pln_prog->plane_descriptor;
@@ -368,7 +371,10 @@ void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context
}
/* get config for each phantom pipe */
- if (pln_prog->phantom_plane.valid) {
+ if (pln_prog->phantom_plane.valid &&
+ dc_phantom_pipes[0] &&
+ dc_main_pipes[0]->stream &&
+ dc_phantom_pipes[0]->plane_state) {
for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) {
ASSERT(dc_phantom_pipes[dc_pipe_index]);
if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index]) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index 4b8691c43523..04edcde423a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -235,7 +235,6 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters
phantom->timing.v_active = meta->v_active;
phantom->timing.v_front_porch = meta->v_front_porch;
phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active;
- phantom->timing.dsc.enable = dml2_dsc_disable;
phantom->timing.drr_config.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index e7e6751f4477..be73784e21eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -4202,10 +4202,10 @@ static void CalculateOutputLink(
}
if (Output == dml2_dp2p0) {
*OutBpp = 0;
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr10) && PHYCLKD32 >= 10000 / 32) {
+ if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr10) && PHYCLKD32 >= 10000.0 / 32) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && PHYCLKD32 < 13500 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
+ if (*OutBpp == 0 && PHYCLKD32 < 13500.0 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
@@ -4215,11 +4215,11 @@ static void CalculateOutputLink(
*OutputType = dml2_core_internal_output_type_dp2p0;
*OutputRate = dml2_core_internal_output_rate_dp_rate_uhbr10;
}
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr13p5) && *OutBpp == 0 && PHYCLKD32 >= 13500 / 32) {
+ if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr13p5) && *OutBpp == 0 && PHYCLKD32 >= 13500.0 / 32) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && PHYCLKD32 < 20000 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
+ if (*OutBpp == 0 && PHYCLKD32 < 20000.0 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
@@ -4229,7 +4229,7 @@ static void CalculateOutputLink(
*OutputType = dml2_core_internal_output_type_dp2p0;
*OutputRate = dml2_core_internal_output_rate_dp_rate_uhbr13p5;
}
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr20) && *OutBpp == 0 && PHYCLKD32 >= 20000 / 32) {
+ if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr20) && *OutBpp == 0 && PHYCLKD32 >= 20000.0 / 32) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 20000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
@@ -4306,33 +4306,33 @@ static void CalculateOutputLink(
*RequiresFEC = false;
}
*OutBpp = 0;
- if (PHYCLKD18 >= 3000 / 18) {
+ if (PHYCLKD18 >= 3000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 3000, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
//OutputTypeAndRate = Output & "3x3";
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_3x3;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 6000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 6000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 6000, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
//OutputTypeAndRate = Output & "6x3";
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_6x3;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 6000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 6000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 6000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
//OutputTypeAndRate = Output & "6x4";
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_6x4;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 8000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 8000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 8000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
//OutputTypeAndRate = Output & "8x4";
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_8x4;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 10000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 10000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 10000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0 && PHYCLKD18 < 12000 / 18) {
+ if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0 && PHYCLKD18 < 12000.0 / 18) {
*RequiresDSC = true;
LinkDSCEnable = true;
*RequiresFEC = true;
@@ -4342,7 +4342,7 @@ static void CalculateOutputLink(
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_10x4;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 12000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 12000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 12000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
@@ -4501,24 +4501,6 @@ static void CalculateSurfaceSizeInMall(
math_floor2((double)composition->viewport.plane1.y_start + composition->viewport.plane1.height + ReadBlockHeightC[k] - 1, ReadBlockHeightC[k]) -
math_floor2(composition->viewport.plane1.y_start, ReadBlockHeightC[k])) * BytesPerPixelC[k]);
}
- if (surface->dcc.enable) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_min2(math_ceil2(surface->plane0.width, 8 * Read256BytesBlockWidthY[k]),
- math_floor2(composition->viewport.plane0.x_start + composition->viewport.plane0.width + 8 * Read256BytesBlockWidthY[k] - 1, 8 * Read256BytesBlockWidthY[k]) -
- math_floor2(composition->viewport.plane0.x_start, 8 * Read256BytesBlockWidthY[k])) *
- math_min2(math_ceil2(surface->plane0.height, 8 * Read256BytesBlockHeightY[k]),
- math_floor2(composition->viewport.plane0.y_start + composition->viewport.plane0.height + 8 * Read256BytesBlockHeightY[k] - 1, 8 * Read256BytesBlockHeightY[k]) -
- math_floor2(composition->viewport.plane0.y_start, 8 * Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256) + (64 * 1024);
- if (Read256BytesBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_min2(math_ceil2(surface->plane1.width, 8 * Read256BytesBlockWidthC[k]),
- math_floor2(composition->viewport.plane1.y_start + composition->viewport.plane1.width + 8 * Read256BytesBlockWidthC[k] - 1, 8 * Read256BytesBlockWidthC[k]) -
- math_floor2(composition->viewport.plane1.y_start, 8 * Read256BytesBlockWidthC[k])) *
- math_min2(math_ceil2(surface->plane1.height, 8 * Read256BytesBlockHeightC[k]),
- math_floor2(composition->viewport.plane1.y_start + composition->viewport.plane1.height + 8 * Read256BytesBlockHeightC[k] - 1, 8 * Read256BytesBlockHeightC[k]) -
- math_floor2(composition->viewport.plane1.y_start, 8 * Read256BytesBlockHeightC[k])) * BytesPerPixelC[k] / 256);
- }
- }
} else {
SurfaceSizeInMALL[k] = (unsigned int)(math_ceil2(math_min2(surface->plane0.width, composition->viewport.plane0.width + ReadBlockWidthY[k] - 1), ReadBlockWidthY[k]) *
math_ceil2(math_min2(surface->plane0.height, composition->viewport.plane0.height + ReadBlockHeightY[k] - 1), ReadBlockHeightY[k]) * BytesPerPixelY[k]);
@@ -4527,17 +4509,6 @@ static void CalculateSurfaceSizeInMall(
math_ceil2(math_min2(surface->plane1.width, composition->viewport.plane1.width + ReadBlockWidthC[k] - 1), ReadBlockWidthC[k]) *
math_ceil2(math_min2(surface->plane1.height, composition->viewport.plane1.height + ReadBlockHeightC[k] - 1), ReadBlockHeightC[k]) * BytesPerPixelC[k]);
}
- if (surface->dcc.enable) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_ceil2(math_min2(surface->plane0.width, composition->viewport.plane0.width + 8 * Read256BytesBlockWidthY[k] - 1), 8 * Read256BytesBlockWidthY[k]) *
- math_ceil2(math_min2(surface->plane0.height, composition->viewport.plane0.height + 8 * Read256BytesBlockHeightY[k] - 1), 8 * Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256) + (64 * 1024);
-
- if (Read256BytesBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_ceil2(math_min2(surface->plane1.width, composition->viewport.plane1.width + 8 * Read256BytesBlockWidthC[k] - 1), 8 * Read256BytesBlockWidthC[k]) *
- math_ceil2(math_min2(surface->plane1.height, composition->viewport.plane1.height + 8 * Read256BytesBlockHeightC[k] - 1), 8 * Read256BytesBlockHeightC[k]) * BytesPerPixelC[k] / 256);
- }
- }
}
}
@@ -7155,7 +7126,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) {
+ (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024.0 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) {
mode_lib->ms.support.WritebackLatencySupport = false;
}
}
@@ -7742,7 +7713,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = false;
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl &&
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])) {
mode_lib->ms.RequiredDTBCLK[k] = RequiredDTBCLK(
mode_lib->ms.RequiresDSC[k],
s->PixelClockBackEnd[k],
@@ -7757,6 +7729,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_clocks_khz.dtbclk / 1000)) {
mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = true;
}
+ } else {
+ /* Phantom DTBCLK can be calculated different from main because phantom has no DSC and thus
+ * will have a different output BPP. Ignore phantom DTBCLK requirement and only consider
+ * non-phantom DTBCLK requirements. In map_mode_to_soc_dpm we choose the highest DTBCLK
+ * required - by setting phantom dtbclk to 0 we ignore it.
+ */
+ mode_lib->ms.RequiredDTBCLK[k] = 0;
}
}
@@ -8940,10 +8919,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
} // prefetch schedule
}
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.use_one_row_for_frame[k] = mode_lib->ms.use_one_row_for_frame[k];
- }
-
s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
@@ -11375,7 +11350,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup;
// debug only
- if (((mode_lib->mp.VUpdateOffsetPix[k] + mode_lib->mp.VUpdateWidthPix[k] + mode_lib->mp.VReadyOffsetPix[k]) / display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) <=
+ if (((mode_lib->mp.VUpdateOffsetPix[k] + mode_lib->mp.VUpdateWidthPix[k] + mode_lib->mp.VReadyOffsetPix[k]) / (double) display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) <=
(isInterlaceTiming ?
math_floor2((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch - mode_lib->mp.VStartup[k]) / 2.0, 1.0) :
(int)(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch - mode_lib->mp.VStartup[k]))) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
index 6eb3fec87ec1..0099e58e0b1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
@@ -942,7 +942,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
mode_lib->ms.support.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) {
+ (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024.0 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) {
mode_lib->ms.support.WritebackLatencySupport = false;
}
}
@@ -2647,10 +2647,6 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
} // prefetch schedule
}
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.use_one_row_for_frame[k] = mode_lib->ms.use_one_row_for_frame[k];
- }
-
s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
@@ -6735,10 +6731,10 @@ static void CalculateOutputLink(
}
if (Output == dml2_dp2p0) {
*OutBpp = 0;
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr10) && PHYCLKD32 >= 10000 / 32) {
+ if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr10) && PHYCLKD32 >= 10000.0 / 32) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && PHYCLKD32 < 13500 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
+ if (*OutBpp == 0 && PHYCLKD32 < 13500.0 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
@@ -6748,7 +6744,7 @@ static void CalculateOutputLink(
*OutputType = dml2_core_internal_output_type_dp2p0;
*OutputRate = dml2_core_internal_output_rate_dp_rate_uhbr10;
}
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr13p5) && *OutBpp == 0 && PHYCLKD32 >= 13500 / 32) {
+ if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr13p5) && *OutBpp == 0 && PHYCLKD32 >= 13500.0 / 32) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
@@ -6839,33 +6835,33 @@ static void CalculateOutputLink(
*RequiresFEC = false;
}
*OutBpp = 0;
- if (PHYCLKD18 >= 3000 / 18) {
+ if (PHYCLKD18 >= 3000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 3000, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
//OutputTypeAndRate = Output & "3x3";
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_3x3;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 6000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 6000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 6000, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
//OutputTypeAndRate = Output & "6x3";
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_6x3;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 6000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 6000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 6000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
//OutputTypeAndRate = Output & "6x4";
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_6x4;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 8000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 8000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 8000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
//OutputTypeAndRate = Output & "8x4";
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_8x4;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 10000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 10000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 10000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0 && PHYCLKD18 < 12000 / 18) {
+ if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0 && PHYCLKD18 < 12000.0 / 18) {
*RequiresDSC = true;
LinkDSCEnable = true;
*RequiresFEC = true;
@@ -6875,7 +6871,7 @@ static void CalculateOutputLink(
*OutputType = dml2_core_internal_output_type_hdmifrl;
*OutputRate = dml2_core_internal_output_rate_hdmi_rate_10x4;
}
- if (*OutBpp == 0 && PHYCLKD18 >= 12000 / 18) {
+ if (*OutBpp == 0 && PHYCLKD18 >= 12000.0 / 18) {
*OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 12000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
@@ -11240,7 +11236,7 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup;
// debug only
- if (((mode_lib->mp.VUpdateOffsetPix[k] + mode_lib->mp.VUpdateWidthPix[k] + mode_lib->mp.VReadyOffsetPix[k]) / display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) <=
+ if (((mode_lib->mp.VUpdateOffsetPix[k] + mode_lib->mp.VUpdateWidthPix[k] + (double) mode_lib->mp.VReadyOffsetPix[k]) / display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) <=
(isInterlaceTiming ?
math_floor2((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch - mode_lib->mp.VStartup[k]) / 2.0, 1.0) :
(int)(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch - mode_lib->mp.VStartup[k]))) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 85c64dcefa82..00fedc00a735 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -929,7 +929,8 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
/* check recout height covers entire otg vactive, and single plane */
if (num_planes_per_stream[plane_descriptor->stream_index] > 1 ||
- !plane_descriptor->composition.rect_out_height_spans_vactive) {
+ !plane_descriptor->composition.rect_out_height_spans_vactive ||
+ plane_descriptor->composition.rotation_angle != dml2_rotation_0) {
return false;
}
}
@@ -1081,12 +1082,17 @@ static bool is_timing_group_schedulable(
/* init allow start and end lines for timing group */
stream_method_fams2_meta = get_per_method_common_meta(pmo, per_stream_pstate_strategy[base_stream_idx], base_stream_idx);
+ if (!stream_method_fams2_meta)
+ return false;
+
group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
stream_method_fams2_meta = get_per_method_common_meta(pmo, per_stream_pstate_strategy[i], i);
+ if (!stream_method_fams2_meta)
+ continue;
if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
@@ -1361,7 +1367,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
break;
}
- strategy_matches_drr_requirements =
+ strategy_matches_drr_requirements &=
stream_matches_drr_policy(pmo, display_cfg, per_stream_pstate_strategy[stream_index], stream_index);
if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
index 1142fdade334..6f334fdc6eb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
@@ -259,7 +259,7 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
/*
* Phase 5: Optimize for Stutter
*/
- memset(&l->vmin_phase, 0, sizeof(struct optimization_phase_params));
+ memset(&l->stutter_phase, 0, sizeof(struct optimization_phase_params));
l->stutter_phase.dml = dml;
l->stutter_phase.display_config = &l->base_display_config_with_meta;
l->stutter_phase.init_function = dml2_top_optimization_init_function_stutter;
@@ -272,7 +272,7 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
if (stutter_success) {
memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- l->base_display_config_with_meta.stage4.success = true;
+ l->base_display_config_with_meta.stage5.success = true;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 02fb2cb1c43c..6eccf0241d85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -68,7 +68,7 @@ static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state
if (state->streams[i]->stream_id == stream_id) {
for (j = 0; j < state->stream_status[i].plane_count; j++) {
if (state->stream_status[i].plane_states[j] == plane &&
- (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) {
+ (!is_plane_duplicate || (j == plane_index))) {
*plane_id = (i << 16) | j;
return true;
}
@@ -707,8 +707,8 @@ static void free_unused_pipes_for_plane(struct dml2_context *ctx, struct dc_stat
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
if (state->res_ctx.pipe_ctx[i].plane_state == plane &&
state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id &&
- (!is_plane_duplicate || (is_plane_duplicate &&
- ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx] == plane_index)) &&
+ (!is_plane_duplicate ||
+ ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx] == plane_index) &&
!is_pipe_used(pool, state->res_ctx.pipe_ctx[i].pipe_idx)) {
free_pipe(&state->res_ctx.pipe_ctx[i]);
}
@@ -874,13 +874,14 @@ static unsigned int get_target_odm_factor(
default:
break;
}
- }
- else if (ctx->architecture == dml2_architecture_21) {
+ } else if (ctx->architecture == dml2_architecture_21) {
if (ctx->config.svp_pstate.callbacks.get_stream_subvp_type(state, stream) == SUBVP_PHANTOM) {
struct dc_stream_state *main_stream;
/* get stream id of main stream */
main_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(state, stream);
+ if (!main_stream)
+ goto failed;
/* get cfg idx for associated main stream */
cfg_idx = find_disp_cfg_idx_by_stream_id(
@@ -893,6 +894,7 @@ static unsigned int get_target_odm_factor(
return ctx->v21.mode_programming.programming->stream_programming[cfg_idx].num_odms_required;
}
+failed:
ASSERT(false);
return 1;
}
@@ -903,6 +905,9 @@ static unsigned int get_source_odm_factor(const struct dml2_context *ctx,
{
struct pipe_ctx *otg_master = ctx->config.callbacks.get_otg_master_for_stream(&state->res_ctx, stream);
+ if (!otg_master)
+ return 0;
+
return ctx->config.callbacks.get_odm_slice_count(otg_master);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index c04ebf5434c9..935893456849 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -739,6 +739,7 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
out->DSCEnable[location] = (enum dml_dsc_enable)in->timing.flags.DSC;
out->OutputLinkDPLanes[location] = 4; // As per code in dcn20_resource.c
out->DSCInputBitPerComponent[location] = 12; // As per code in dcn20_resource.c
+ out->DSCSlices[location] = in->timing.dsc_cfg.num_slices_h;
switch (in->signal) {
case SIGNAL_TYPE_DISPLAY_PORT_MST:
@@ -1109,7 +1110,7 @@ static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *conte
if (context->streams[i]->stream_id == stream_id) {
for (j = 0; j < context->stream_status[i].plane_count; j++) {
if (context->stream_status[i].plane_states[j] == plane &&
- (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) {
+ (!is_plane_duplicate || (j == plane_index))) {
*plane_id = (i << 16) | j;
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 17cc2fdd7d34..f6310408dbba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -573,10 +573,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
bool need_recalculation = false;
uint32_t cstate_enter_plus_exit_z8_ns;
- if (!context)
- return true;
-
- else if (context->stream_count == 0) {
+ if (context->stream_count == 0) {
unsigned int lowest_state_idx = 0;
out_clks.p_state_supported = true;
@@ -740,6 +737,7 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
dml21_reinit(in_dc, dml2, config);
+ return;
}
// Store config options
@@ -841,9 +839,10 @@ void dml2_reinit(const struct dc *in_dc,
struct dml2_context **dml2)
{
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
- dml21_reinit(in_dc, dml2, config);
- }
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ dml21_reinit(in_dc, dml2, config);
+ return;
+ }
dml2_init(in_dc, config, dml2);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index f8c0cee34080..40acebd13e46 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -64,22 +64,23 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
}
// Shaper LUT (RAM), 3D LUT (mode, bit-depth, size)
- REG_GET(CM_SHAPER_CONTROL,
- CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
- REG_GET(CM_3DLUT_MODE,
- CM_3DLUT_MODE_CURRENT, &s->lut3d_mode);
- REG_GET(CM_3DLUT_READ_WRITE_CONTROL,
- CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
- REG_GET(CM_3DLUT_MODE,
- CM_3DLUT_SIZE, &s->lut3d_size);
+ if (REG(CM_SHAPER_CONTROL))
+ REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
+ if (REG(CM_3DLUT_MODE))
+ REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+ if (REG(CM_3DLUT_READ_WRITE_CONTROL))
+ REG_GET(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+ if (REG(CM_3DLUT_MODE))
+ REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &s->lut3d_size);
// Blend/Out Gamma (RAM)
- REG_GET(CM_BLNDGAM_CONTROL,
- CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode);
- if (s->rgam_lut_mode){
- REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode);
- if (!rgam_lut_mode)
- s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+ if (REG(CM_BLNDGAM_CONTROL)) {
+ REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode);
+ if (s->rgam_lut_mode) {
+ REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode);
+ if (!rgam_lut_mode)
+ s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+ }
}
}
@@ -218,7 +219,6 @@ void dpp3_cnv_setup (
uint32_t alpha_plane_enable = 0;
uint32_t dealpha_en = 0, dealpha_ablnd_en = 0;
uint32_t realpha_en = 0, realpha_ablnd_en = 0;
- uint32_t program_prealpha_dealpha = 0;
struct out_csc_color_matrix tbl_entry;
int i;
@@ -346,10 +346,6 @@ void dpp3_cnv_setup (
CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
- if (program_prealpha_dealpha) {
- dealpha_en = 1;
- realpha_en = 1;
- }
REG_SET_2(PRE_DEALPHA, 0,
PRE_DEALPHA_EN, dealpha_en,
PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index eee64d8e1013..7cae18fd7be9 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -68,7 +68,6 @@ void dpp401_dpp_setup(
uint32_t alpha_plane_enable = 0;
uint32_t dealpha_en = 0, dealpha_ablnd_en = 0;
uint32_t realpha_en = 0, realpha_ablnd_en = 0;
- uint32_t program_prealpha_dealpha = 0;
struct out_csc_color_matrix tbl_entry;
int i;
@@ -192,10 +191,6 @@ void dpp401_dpp_setup(
CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
- if (program_prealpha_dealpha) {
- dealpha_en = 1;
- realpha_en = 1;
- }
REG_SET_2(PRE_DEALPHA, 0,
PRE_DEALPHA_EN, dealpha_en,
PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en);
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/Makefile b/drivers/gpu/drm/amd/display/dc/dwb/Makefile
new file mode 100644
index 000000000000..16f7a454fed9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dwb/Makefile
@@ -0,0 +1,37 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+#
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN35
+###############################################################################
+DWB_DCN35 = dcn35_dwb.o
+
+AMD_DAL_DWB_DCN35 = $(addprefix $(AMDDALPATH)/dc/dwb/dcn35/,$(DWB_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DWB_DCN35)
+
+
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c
index b23a809999ed..b23a809999ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.h
index 886e727ed080..886e727ed080 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.h
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 525bc8881950..d9e6e70dc394 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -170,8 +170,7 @@ static enum gpio_result set_config(
return GPIO_RESULT_OK;
case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
- if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
- (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ if (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA) {
REG_UPDATE_3(ddc_setup,
DC_I2C_DDC1_ENABLE, 1,
DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
@@ -180,8 +179,7 @@ static enum gpio_result set_config(
}
break;
case GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT:
- if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
- (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ if (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA) {
REG_UPDATE_3(ddc_setup,
DC_I2C_DDC1_ENABLE, 1,
DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
@@ -190,8 +188,7 @@ static enum gpio_result set_config(
}
break;
case GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING:
- if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
- (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ if (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA) {
REG_UPDATE_2(ddc_setup,
DC_I2C_DDC1_ENABLE, 0,
DC_I2C_DDC1_EDID_DETECT_ENABLE, 0);
@@ -231,7 +228,7 @@ void dal_hw_ddc_init(
enum gpio_id id,
uint32_t en)
{
- if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
+ if (en > GPIO_DDC_LINE_MAX) {
ASSERT_CRITICAL(false);
*hw_ddc = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
index f9e847e6555d..6cd50232c432 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c
@@ -106,7 +106,7 @@ void dal_hw_generic_init(
enum gpio_id id,
uint32_t en)
{
- if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
+ if (en > GPIO_DDC_LINE_MAX) {
ASSERT_CRITICAL(false);
*hw_generic = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
index 1489fdfaf0e7..3f13a744d07d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -127,7 +127,7 @@ void dal_hw_hpd_init(
enum gpio_id id,
uint32_t en)
{
- if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
+ if (en > GPIO_DDC_LINE_MAX) {
ASSERT_CRITICAL(false);
*hw_hpd = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/Makefile b/drivers/gpu/drm/amd/display/dc/hpo/Makefile
new file mode 100644
index 000000000000..c248bd86b477
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hpo/Makefile
@@ -0,0 +1,35 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+#
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN32
+###############################################################################
+HPO_DCN32 = dcn32_hpo_dp_link_encoder.o
+
+AMD_DAL_HPO_DCN32 = $(addprefix $(AMDDALPATH)/dc/hpo/dcn32/,$(HPO_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HPO_DCN32)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c
index 8af01f579690..8af01f579690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
index 176b1537d2a1..176b1537d2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index f489371a3bc6..1b410aff6c56 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -742,12 +742,10 @@ void dce110_edp_wait_for_hpd_ready(
return;
}
- if (link != NULL) {
- if (link->panel_config.pps.extra_t3_ms > 0) {
- int extra_t3_in_ms = link->panel_config.pps.extra_t3_ms;
+ if (link->panel_config.pps.extra_t3_ms > 0) {
+ int extra_t3_in_ms = link->panel_config.pps.extra_t3_ms;
- msleep(extra_t3_in_ms);
- }
+ msleep(extra_t3_in_ms);
}
dal_gpio_open(hpd, GPIO_MODE_INTERRUPT);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index de6ee6bf0a88..35151dd056cb 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -289,6 +289,7 @@ static void dcn10_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
+ bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
@@ -301,16 +302,15 @@ static void dcn10_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
- dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ if (dpp->funcs->dpp_get_gamut_remap) {
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ is_gamut_remap_available = true;
+ }
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %11xh %11s %9s %9s"
- " %12s "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld",
+ DTN_INFO("[%2d]: %11xh %11s %9s %9s",
dpp->inst,
s.igam_input_format,
(s.igam_lut_mode == 0) ? "BypassFixed" :
@@ -329,22 +329,27 @@ static void dcn10_log_color_state(struct dc *dc,
((s.rgam_lut_mode == 2) ? "Ycc" :
((s.rgam_lut_mode == 3) ? "RAM" :
((s.rgam_lut_mode == 4) ? "RAM" :
- "Unknown")))),
- (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
- ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
- "SW"),
- s.gamut_remap.temperature_matrix[0].value,
- s.gamut_remap.temperature_matrix[1].value,
- s.gamut_remap.temperature_matrix[2].value,
- s.gamut_remap.temperature_matrix[3].value,
- s.gamut_remap.temperature_matrix[4].value,
- s.gamut_remap.temperature_matrix[5].value,
- s.gamut_remap.temperature_matrix[6].value,
- s.gamut_remap.temperature_matrix[7].value,
- s.gamut_remap.temperature_matrix[8].value,
- s.gamut_remap.temperature_matrix[9].value,
- s.gamut_remap.temperature_matrix[10].value,
- s.gamut_remap.temperature_matrix[11].value);
+ "Unknown")))));
+ if (is_gamut_remap_available)
+ DTN_INFO(" %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+
DTN_INFO("\n");
}
DTN_INFO("\n");
@@ -1255,7 +1260,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc,
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
// so don't wait for MPCC_IDLE in the programming sequence
- if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
+ if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 0d58c9d439c6..82d1ded09561 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -2051,11 +2051,14 @@ void dcn20_program_front_end_for_ctx(
if (tg->funcs->enable_crtc) {
if (dc->hwss.blank_phantom) {
- int main_pipe_width, main_pipe_height;
+ int main_pipe_width = 0, main_pipe_height = 0;
struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream);
- main_pipe_width = phantom_stream->dst.width;
- main_pipe_height = phantom_stream->dst.height;
+ if (phantom_stream) {
+ main_pipe_width = phantom_stream->dst.width;
+ main_pipe_height = phantom_stream->dst.height;
+ }
+
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
}
tg->funcs->enable_crtc(tg);
@@ -2800,6 +2803,11 @@ void dcn20_reset_back_end_for_pipe(
if (i == dc->res_pool->pipe_count)
return;
+/*
+ * In case of a dangling plane, setting this to NULL unconditionally
+ * causes failures during reset hw ctx where, if stream is NULL,
+ * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
+ */
pipe_ctx->stream = NULL;
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
index 86d871cc74c7..1635e5a552ad 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
@@ -240,7 +240,7 @@ void dcn201_init_hw(struct dc *dc)
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- if (res_pool->dccg && res_pool->hubbub) {
+ if (res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
&res_pool->ref_clocks.dccg_ref_clock_inKhz);
@@ -408,8 +408,7 @@ void dcn201_plane_atomic_disconnect(struct dc *dc,
if (mpcc_removed == false)
return;
- if (opp != NULL)
- opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 804be977ea47..3de65a9f0e6f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -142,7 +142,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst,
{
union dmub_rb_cmd cmd;
struct dc_context *dc = abm->ctx;
- uint32_t ramping_boundary = 0xFFFF;
+ uint8_t ramping_boundary = 0xFF;
memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 4c4706153305..bcacfd893cf7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -656,7 +656,7 @@ void dcn30_init_hw(struct dc *dc)
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- if (res_pool->dccg && res_pool->hubbub) {
+ if (res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 1c8abb417b6e..746c522adf84 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -132,7 +132,7 @@ void dcn31_init_hw(struct dc *dc)
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- if (res_pool->dccg && res_pool->hubbub) {
+ if (res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 8e68e05e3b72..388404cdeeaa 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -379,8 +379,25 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (otg_disabled[i])
+ if (otg_disabled[i]) {
+ int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst };
+ int opp_cnt = 1;
+ int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe, true);
+ int odm_slice_width = resource_get_odm_slice_dst_width(pipe, false);
+ struct pipe_ctx *odm_pipe;
+
+ for (odm_pipe = pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
+ opp_cnt++;
+ }
+ if (opp_cnt > 1)
+ pipe->stream_res.tg->funcs->set_odm_combine(
+ pipe->stream_res.tg,
+ opp_inst, opp_cnt,
+ odm_slice_width,
+ last_odm_slice_width);
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 0d27eec724b4..bdbb4a71651f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -271,58 +271,55 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
}
if (enable) {
- if (dc->current_state) {
+ /* 1. Check no memory request case for CAB.
+ * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message
+ */
+ if (dcn32_check_no_memory_request_for_cab(dc)) {
+ /* Enable no-memory-requests case */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
+ cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
+ cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
- /* 1. Check no memory request case for CAB.
- * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message
- */
- if (dcn32_check_no_memory_request_for_cab(dc)) {
- /* Enable no-memory-requests case */
- memset(&cmd, 0, sizeof(cmd));
- cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
- cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
- cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
- dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ return true;
+ }
- return true;
- }
+ /* 2. Check if all surfaces can fit in CAB.
+ * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
+ * and configure HUBP's to fetch from MALL
+ */
+ ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- /* 2. Check if all surfaces can fit in CAB.
- * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
- * and configure HUBP's to fetch from MALL
- */
- ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
+ /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
+ * or TMZ surface, don't try to enter MALL.
+ */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+ plane = dc->current_state->stream_status[i].plane_states[j];
- /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
- * or TMZ surface, don't try to enter MALL.
- */
- for (i = 0; i < dc->current_state->stream_count; i++) {
- for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
- plane = dc->current_state->stream_status[i].plane_states[j];
-
- if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
- plane->address.tmz_surface) {
- mall_ss_unsupported = true;
- break;
- }
- }
- if (mall_ss_unsupported)
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
+ plane->address.tmz_surface) {
+ mall_ss_unsupported = true;
break;
+ }
}
- if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
- memset(&cmd, 0, sizeof(cmd));
- cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
- cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
- cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
- cmd.cab.cab_alloc_ways = (uint8_t)ways;
-
- dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ if (mall_ss_unsupported)
+ break;
+ }
+ if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
+ cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
+ cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
+ cmd.cab.cab_alloc_ways = (uint8_t)ways;
- return true;
- }
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ return true;
}
+
return false;
}
@@ -806,7 +803,7 @@ void dcn32_init_hw(struct dc *dc)
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- if (res_pool->dccg && res_pool->hubbub) {
+ if (res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
&res_pool->ref_clocks.dccg_ref_clock_inKhz);
@@ -1237,8 +1234,25 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (otg_disabled[i])
+ if (otg_disabled[i]) {
+ int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst };
+ int opp_cnt = 1;
+ int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe, true);
+ int odm_slice_width = resource_get_odm_slice_dst_width(pipe, false);
+ struct pipe_ctx *odm_pipe;
+
+ for (odm_pipe = pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
+ opp_cnt++;
+ }
+ if (opp_cnt > 1)
+ pipe->stream_res.tg->funcs->set_odm_combine(
+ pipe->stream_res.tg,
+ opp_inst, opp_cnt,
+ odm_slice_width,
+ last_odm_slice_width);
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 2b3ba5971c69..e4f7078c1026 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -188,7 +188,7 @@ void dcn35_init_hw(struct dc *dc)
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- if (res_pool->dccg && res_pool->hubbub) {
+ if (res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
@@ -1078,6 +1078,19 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
update_state->pg_pipe_res_update[PG_OPTC][0] = false;
}
+ if (dc->caps.sequential_ono) {
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (!update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ !update_state->pg_pipe_res_update[PG_DPP][i]) {
+ for (j = i - 1; j >= 0; j--) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = false;
+ update_state->pg_pipe_res_update[PG_DPP][j] = false;
+ }
+
+ break;
+ }
+ }
+ }
}
void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
@@ -1177,6 +1190,19 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
if (hpo_frl_stream_enc_acquired)
update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
+ if (dc->caps.sequential_ono) {
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ for (j = i - 1; j >= 0; j--) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = true;
+ update_state->pg_pipe_res_update[PG_DPP][j] = true;
+ }
+
+ break;
+ }
+ }
+ }
}
/**
@@ -1197,6 +1223,8 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
* ONO Region 2, DCPG 24: mpc opp optc dwb
* ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
*
+ * If sequential ONO is specified the order is modified from ONO Region 11 -> ONO Region 0 descending.
+ *
* @dc: Current DC state
* @update_state: update PG sequence states for HW block
*/
@@ -1216,19 +1244,35 @@ void dcn35_hw_block_power_down(struct dc *dc,
pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
- update_state->pg_pipe_res_update[PG_DPP][i]) {
- if (pg_cntl->funcs->hubp_dpp_pg_control)
- pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
+ if (!dc->caps.sequential_ono) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
+ }
}
- }
- for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
- if (update_state->pg_pipe_res_update[PG_DSC][i]) {
- if (pg_cntl->funcs->dsc_pg_control)
- pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
+ }
}
+ } else {
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
+ }
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
+ }
+ }
+ }
/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
if (pg_cntl->funcs->plane_otg_pg_control)
@@ -1256,6 +1300,8 @@ void dcn35_hw_block_power_down(struct dc *dc,
* ONO Region 10, DCPG 3: dchubp3, dpp3
* ONO Region 3, DCPG 25: hpo - SKIPPED
*
+ * If sequential ONO is specified the order is modified from ONO Region 0 -> ONO Region 11 ascending.
+ *
* @dc: Current DC state
* @update_state: update PG sequence states for HW block
*/
@@ -1274,11 +1320,13 @@ void dcn35_hw_block_power_up(struct dc *dc,
if (pg_cntl->funcs->plane_otg_pg_control)
pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
- for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
- if (update_state->pg_pipe_res_update[PG_DSC][i]) {
- if (pg_cntl->funcs->dsc_pg_control)
- pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
- }
+ if (!dc->caps.sequential_ono) {
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
+ }
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
@@ -1286,6 +1334,13 @@ void dcn35_hw_block_power_up(struct dc *dc,
if (pg_cntl->funcs->hubp_dpp_pg_control)
pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
}
+
+ if (dc->caps.sequential_ono) {
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
+ }
+ }
}
if (update_state->pg_res_update[PG_HPO]) {
if (pg_cntl->funcs->hpo_pg_control)
@@ -1474,3 +1529,75 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
}
}
}
+
+static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
+{
+ /* Calculate average pixel count per TU, return false if under ~2.00 to
+ * avoid empty TUs. This is only required for DPIA tunneling as empty TUs
+ * are legal to generate for native DP links. Assume TU size 64 as there
+ * is currently no scenario where it's reprogrammed from HW default.
+ * MTPs have no such limitation, so this does not affect MST use cases.
+ */
+ unsigned int pix_clk_mhz;
+ unsigned int symclk_mhz;
+ unsigned int avg_pix_per_tu_x1000;
+ unsigned int tu_size_bytes = 64;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
+ const struct dc *dc = pipe_ctx->stream->link->dc;
+
+ if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ // Not necessary for MST configurations
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ return false;
+
+ pix_clk_mhz = timing->pix_clk_100hz / 10000;
+
+ // If this is true, can't block due to dynamic ODM
+ if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
+ return false;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ symclk_mhz = 162;
+ break;
+ case LINK_RATE_HIGH:
+ symclk_mhz = 270;
+ break;
+ case LINK_RATE_HIGH2:
+ symclk_mhz = 540;
+ break;
+ case LINK_RATE_HIGH3:
+ symclk_mhz = 810;
+ break;
+ default:
+ // We shouldn't be tunneling any other rates, something is wrong
+ ASSERT(0);
+ return false;
+ }
+
+ avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
+ / (symclk_mhz * link_settings->lane_count);
+
+ // Add small empirically-decided margin to account for potential jitter
+ return (avg_pix_per_tu_x1000 < 2020);
+}
+
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
+ if (should_avoid_empty_tu(pipe_ctx))
+ return false;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index bc05beba5f2c..e27b3609020f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -97,4 +97,6 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 30e6a6398839..428912f37129 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -161,7 +161,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 407a45a3ae2c..7a456618f313 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -57,7 +57,16 @@ static void dcn401_initialize_min_clocks(struct dc *dc)
clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
- clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
+ if (dc->debug.disable_boot_optimizations) {
+ clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
+ } else {
+ /* Even though DPG_EN = 1 for the connected display, it still requires the
+ * correct timing so we cannot set DISPCLK to min freq or it could cause
+ * audio corruption. Read current DISPCLK from DENTIST and request the same
+ * freq to ensure that the timing is valid and unchanged.
+ */
+ clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
+ }
clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
clocks->fclk_p_state_change_support = true;
clocks->p_state_change_support = true;
@@ -250,7 +259,7 @@ void dcn401_init_hw(struct dc *dc)
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- if (res_pool->dccg && res_pool->hubbub) {
+ if (res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
&res_pool->ref_clocks.dccg_ref_clock_inKhz);
@@ -304,7 +313,14 @@ void dcn401_init_hw(struct dc *dc)
* everything down.
*/
if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
- hws->funcs.init_pipes(dc, dc->current_state);
+ /* Disable boot optimizations means power down everything including PHY, DIG,
+ * and OTG (i.e. the boot is not optimized because we do a full power down).
+ */
+ if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
+ dc->hwss.enable_accelerated_mode(dc, dc->current_state);
+ else
+ hws->funcs.init_pipes(dc, dc->current_state);
+
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
@@ -798,7 +814,7 @@ enum dc_status dcn401_enable_stream_timing(
unsigned int event_triggers = 0;
int opp_cnt = 1;
int opp_inst[MAX_PIPES] = {0};
- struct pipe_ctx *opp_heads[MAX_PIPES];
+ struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
bool manual_mode;
unsigned int tmds_div = PIXEL_RATE_DIV_NA;
unsigned int unused_div = PIXEL_RATE_DIV_NA;
@@ -1092,6 +1108,8 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
int prev_odm_offset = 0;
int next_odm_width = 0;
int next_odm_offset = 0;
+ struct pipe_ctx *next_odm_pipe = NULL;
+ struct pipe_ctx *prev_odm_pipe = NULL;
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
@@ -1103,6 +1121,7 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
}
}
+
/**
* DCN4 moved cursor composition after Scaler, so in HW it is in
* recout space and for HW Cursor position programming need to
@@ -1124,18 +1143,10 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
* pipe to make sure each pipe enabling cursor on its part of the
* screen.
*/
-
- if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
- x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
- pipe_ctx->stream->src.height;
- y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
- pipe_ctx->stream->src.width;
- } else {
- x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
- pipe_ctx->stream->src.width;
- y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
- pipe_ctx->stream->src.height;
- }
+ x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
+ pipe_ctx->stream->src.width;
+ y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
+ pipe_ctx->stream->src.height;
/**
* If the cursor's source viewport is clipped then we need to
@@ -1158,8 +1169,8 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
* next/prev_odm_offset is to account for scaled modes that have underscan
*/
if (odm_combine_on) {
- struct pipe_ctx *next_odm_pipe = pipe_ctx->next_odm_pipe;
- struct pipe_ctx *prev_odm_pipe = pipe_ctx->prev_odm_pipe;
+ next_odm_pipe = pipe_ctx->next_odm_pipe;
+ prev_odm_pipe = pipe_ctx->prev_odm_pipe;
while (next_odm_pipe != NULL) {
next_odm_width += next_odm_pipe->plane_res.scl_data.recout.width;
@@ -1198,43 +1209,7 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (pos_cpy.enable && dcn401_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
- if (param.rotation == ROTATION_ANGLE_0) {
- int recout_width =
- pipe_ctx->plane_res.scl_data.recout.width;
- int recout_x =
- pipe_ctx->plane_res.scl_data.recout.x;
-
- if (param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= recout_width + recout_x) {
- pos_cpy.x = 2 * recout_width
- - pos_cpy.x + 2 * recout_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * recout_x - pos_cpy.x;
- if (temp_x >= recout_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = 2 * recout_width - temp_x;
- }
- }
- } else {
- pos_cpy.x = recout_width - pos_cpy.x + 2 * recout_x;
- }
- }
- } else if (param.rotation == ROTATION_ANGLE_90) {
- if (!param.mirror) {
- uint32_t temp_y = pos_cpy.y;
-
- pos_cpy.y = pipe_ctx->plane_res.scl_data.recout.height - pos_cpy.x;
- pos_cpy.x = temp_y - prev_odm_width;
- } else {
- swap(pos_cpy.x, pos_cpy.y);
- }
-
- } else if (param.rotation == ROTATION_ANGLE_270) {
+ if (param.rotation == ROTATION_ANGLE_270) {
// Swap axis and mirror vertically
uint32_t temp_x = pos_cpy.x;
@@ -1283,16 +1258,6 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
pos_cpy.x = pipe_ctx->plane_res.scl_data.recout.width + next_odm_width + next_odm_offset - pos_cpy.y;
pos_cpy.y = temp_x;
}
- } else {
- if (param.mirror) {
- swap(pos_cpy.x, pos_cpy.y);
-
- pos_cpy.x = pipe_ctx->plane_res.scl_data.recout.width - pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.recout.x;
- pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.recout.y) + pipe_ctx->plane_res.scl_data.recout.height - pos_cpy.y;
- } else {
- pos_cpy.x = pipe_ctx->plane_res.scl_data.recout.width - pos_cpy.y;
- pos_cpy.y = temp_x;
- }
}
} else if (param.rotation == ROTATION_ANGLE_180) {
// Mirror horizontally and vertically
@@ -1319,21 +1284,10 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
pos_cpy.x = temp_x + recout_width;
}
}
- } else {
- pos_cpy.x = recout_width - pos_cpy.x + 2 * recout_x;
}
+
}
- /**
- * Display groups that are 1xnY, have pos_cpy.y > recout.height
- * Calculation:
- * delta_from_bottom = recout.y + recout.height - pos_cpy.y
- * pos_cpy.y_new = recout.y + delta_from_bottom
- * Simplify it as:
- * pos_cpy.y = recout.y * 2 + recout.height - pos_cpy.y
- */
- pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.recout.y) +
- pipe_ctx->plane_res.scl_data.recout.height - pos_cpy.y;
}
hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
@@ -1607,16 +1561,28 @@ static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
struct pipe_ctx *new_pipe;
struct pipe_ctx *old_opp_heads[MAX_PIPES];
struct dccg *dccg = dc->res_pool->dccg;
- struct pipe_ctx *old_otg_master =
- &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
- int old_opp_head_count = resource_get_opp_heads_for_otg_master(
- old_otg_master, &dc->current_state->res_ctx,
- old_opp_heads);
+ struct pipe_ctx *old_otg_master;
+ int old_opp_head_count = 0;
+
+ old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
+
+ if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
+ old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
+ &dc->current_state->res_ctx,
+ old_opp_heads);
+ } else {
+ // DC cannot assume that the current state and the new state
+ // share the same OTG pipe since this is not true when called
+ // in the context of a commit stream not checked. Hence, set
+ // old_otg_master to NULL to skip the DSC configuration.
+ old_otg_master = NULL;
+ }
+
if (otg_master->stream_res.dsc)
dcn32_update_dsc_on_stream(otg_master,
otg_master->stream->timing.flags.DSC);
- if (old_otg_master->stream_res.dsc) {
+ if (old_otg_master && old_otg_master->stream_res.dsc) {
for (i = 0; i < old_opp_head_count; i++) {
old_pipe = old_opp_heads[i];
new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
@@ -1701,3 +1667,23 @@ void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
hws->funcs.edp_backlight_control(link, true);
}
+
+void dcn401_hardware_release(struct dc *dc)
+{
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
+
+ /* If pstate unsupported, or still supported
+ * by firmware, force it supported by dcn
+ */
+ if (dc->current_state) {
+ if ((!dc->clk_mgr->clks.p_state_change_support ||
+ dc->current_state->bw_ctx.bw.dcn.fams2_stream_count > 0) &&
+ dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, true);
+
+ dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index f91159a6e6d4..bada43d4b2eb 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -75,7 +75,7 @@ void dcn401_fams2_global_control_lock(struct dc *dc,
void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable);
void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params);
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings);
-
+void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index 8358ba74405f..1cf0608e1980 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -79,7 +79,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.does_plane_fit_in_mall = NULL,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
- .hardware_release = dcn30_hardware_release,
+ .hardware_release = dcn401_hardware_release,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.fams2_global_control_lock = dcn401_fams2_global_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
.fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
+ .power_down = dce110_power_down,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index c80ebb407add..27bba47186e9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,7 +44,7 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
-#define MAX_LINKS (MAX_PIPES * 2)
+#define MAX_LINKS (MAX_PIPES * 2 +2)
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 7ab8ba5e23ed..72a8479e1f2d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -272,7 +272,7 @@ struct link_service {
uint16_t psr_vtotal_idle,
uint16_t psr_vtotal_su);
void (*edp_get_psr_residency)(
- const struct dc_link *link, uint32_t *residency);
+ const struct dc_link *link, uint32_t *residency, enum psr_residency_mode mode);
bool (*edp_get_replay_state)(
const struct dc_link *link, uint64_t *state);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index d100edaedbbb..eca3d7ee7e4e 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -76,7 +76,7 @@ static const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
- if (source >= DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
+ if (source >= DAL_IRQ_SOURCES_NUMBER)
return NULL;
return &irq_service->info[source];
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index 5302d2c9c760..116ff37126e7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -168,9 +168,9 @@ static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
- // Give retimer extra time to lock before updating DP_TRAINING_PATTERN_SET to TPS1
- if (tp_params->dp_phy_pattern == DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE)
- msleep(30);
+ // Give retimer extra time to lock before updating DP_TRAINING_PATTERN_SET to TPS1 or phy pattern
+ if (tp_params->dp_phy_pattern != DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE)
+ msleep(50);
}
static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index a9ddbe12142f..65607589495f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -1976,7 +1976,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
/* We need to enable stream encoder for TMDS first to apply 1/4 TMDS
* character clock in case that beyond 340MHz.
*/
- if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal))
link_hwss->setup_stream_encoder(pipe_ctx);
dc->hwss.enable_tmds_link_output(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 964abccebdc6..00974c50e11f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -363,10 +363,10 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
bool dp_is_lttpr_present(struct dc_link *link)
{
+ /* Some sink devices report invalid LTTPR revision, so don't validate against that cap */
return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4);
}
/* in DP compliance test, DPR-120 may have
@@ -399,7 +399,20 @@ static enum dc_link_rate get_link_rate_from_max_link_bw(
static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
{
- enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+ enum dc_link_rate lttpr_max_link_rate = LINK_RATE_UNKNOWN;
+
+ switch (link->dpcd_caps.lttpr_caps.max_link_rate) {
+ case LINK_RATE_LOW:
+ case LINK_RATE_HIGH:
+ case LINK_RATE_HIGH2:
+ case LINK_RATE_HIGH3:
+ lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+ break;
+ default:
+ // Assume all LTTPRs support up to HBR3 to improve misbehaving sink interop
+ lttpr_max_link_rate = LINK_RATE_HIGH3;
+ }
if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
lttpr_max_link_rate = LINK_RATE_UHBR20;
@@ -917,21 +930,17 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
memset(link_setting, 0, sizeof(*link_setting));
- /* if preferred is specified through AMDDP, use it, if it's enough
- * to drive the mode
- */
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
+ if (dc_is_dp_signal(stream->signal) &&
+ link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) {
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
*link_setting = link->preferred_link_setting;
- return true;
- }
-
- /* MST doesn't perform link training for now
- * TODO: add MST specific link training routine
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ } else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
decide_mst_link_settings(link, link_setting);
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
/* enable edp link optimization for DSC eDP case */
@@ -2111,7 +2120,8 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
if (cable_max_link_rate < max_link_cap.link_rate)
max_link_cap.link_rate = cable_max_link_rate;
- if (!link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY)
+ if (!link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY &&
+ link->dpcd_caps.cable_id.bits.CABLE_TYPE >= 2)
is_uhbr13_5_supported = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 5f087e930cb6..96bf135b6f05 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -387,7 +387,6 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
union device_service_irq device_service_clear = {0};
enum dc_status result;
bool status = false;
- bool allow_active = false;
if (out_link_loss)
*out_link_loss = false;
@@ -442,6 +441,12 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
return false;
}
+ if (handle_hpd_irq_psr_sink(link))
+ /* PSR-related error was detected and handled */
+ return true;
+
+ handle_hpd_irq_replay_sink(link);
+
/* If PSR-related error handled, Main link may be off,
* so do not handle as a normal sink status change interrupt.
*/
@@ -463,8 +468,10 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
* If we got sink count changed it means
* Downstream port status changed,
* then DM should call DC to do the detection.
+ * NOTE: Do not handle link loss on eDP since it is internal link
*/
- if (dp_parse_link_loss_status(
+ if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
+ dp_parse_link_loss_status(
link,
&hpd_irq_dpcd_data)) {
/* Connectivity log: link loss */
@@ -473,11 +480,6 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
sizeof(hpd_irq_dpcd_data),
"Status: ");
- if (link->psr_settings.psr_feature_enabled)
- edp_set_psr_allow_active(link, &allow_active, true, false, NULL);
- else if (link->replay_settings.replay_allow_active)
- edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
-
if (defer_handling && has_left_work)
*has_left_work = true;
else
@@ -490,14 +492,6 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
dp_trace_link_loss_increment(link);
}
- if (*out_link_loss == false) {
- if (handle_hpd_irq_psr_sink(link))
- /* PSR-related error was detected and handled */
- return true;
-
- handle_hpd_irq_replay_sink(link);
- }
-
if (link->type == dc_connection_sst_branch &&
hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
!= link->dpcd_sink_count)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index a93dd83cd8c0..5a0f574056d4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -329,8 +329,12 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti
if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
- if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
- max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
+
+ /* Note, we are not checking
+ * if max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL,
+ * since FFE_PRESET.settings.level is 4 bits and DP_FFE_PRESET_MAX_LEVEL equals 15,
+ * so FFE_PRESET.settings.level will never be greater than 15.
+ */
/* make sure the pre-emphasis matches the voltage swing*/
if (max_requested.PRE_EMPHASIS >
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 89f66d88c3b0..455b85adec28 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -248,9 +248,6 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable)
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- if (link->dc->debug.edp_oled_no_backlight_enable && link->dpcd_sink_ext_caps.bits.oled)
- return true;
-
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
&backlight_enable, 1) != DC_OK)
return false;
@@ -853,7 +850,7 @@ bool edp_setup_psr(struct dc_link *link,
}
-void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency)
+void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency, enum psr_residency_mode mode)
{
struct dc *dc = link->ctx->dc;
struct dmub_psr *psr = dc->res_pool->psr;
@@ -864,7 +861,7 @@ void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency)
// PSR residency measurements only supported on DMCUB
if (psr != NULL && link->psr_settings.psr_feature_enabled)
- psr->funcs->psr_get_residency(psr, residency, panel_inst);
+ psr->funcs->psr_get_residency(psr, residency, panel_inst, mode);
else
*residency = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index cb6d95cc36e4..8df8ac5bde5b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -51,7 +51,7 @@ bool edp_setup_psr(struct dc_link *link,
struct psr_context *psr_context);
bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link,
uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
-void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency, enum psr_residency_mode mode);
bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
bool wait, bool force_static, const unsigned int *power_opts);
bool edp_setup_replay(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
new file mode 100644
index 000000000000..505bc0517e08
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
@@ -0,0 +1,45 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+#
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN32
+###############################################################################
+MMHUBBUB_DCN32 = dcn32_mmhubbub.o
+
+AMD_DAL_MMHUBBUB_DCN32 = $(addprefix $(AMDDALPATH)/dc/mmhubbub/dcn32/,$(MMHUBBUB_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MMHUBBUB_DCN32)
+
+###############################################################################
+# DCN35
+###############################################################################
+MMHUBBUB_DCN35 = dcn35_mmhubbub.o
+
+AMD_DAL_MMHUBBUB_DCN35 = $(addprefix $(AMDDALPATH)/dc/mmhubbub/dcn35/,$(MMHUBBUB_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MMHUBBUB_DCN35)
+
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.c
index c3b089ba511a..c3b089ba511a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.h
index ef15b4f1f6b9..ef15b4f1f6b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.c
index 4317100564a4..4317100564a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.h
index 098e13e07272..098e13e07272 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.h
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
new file mode 100644
index 000000000000..7f7458c07e2a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
@@ -0,0 +1,45 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+#
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN32
+###############################################################################
+MPC_DCN32 = dcn32_mpc.o
+
+AMD_DAL_MPC_DCN32 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn32/,$(MPC_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN32)
+
+###############################################################################
+# DCN401
+###############################################################################
+MPC_DCN401 = dcn401_mpc.o
+
+AMD_DAL_MPC_DCN401 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn401/,$(MPC_DCN401))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN401)
+
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index a0e9e9f0441a..a0e9e9f0441a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
index 9622518826c9..9622518826c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index 37ab5a4eefc7..37ab5a4eefc7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index af44054c2477..af44054c2477 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
diff --git a/drivers/gpu/drm/amd/display/dc/opp/Makefile b/drivers/gpu/drm/amd/display/dc/opp/Makefile
new file mode 100644
index 000000000000..fbfb3c3ad819
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/opp/Makefile
@@ -0,0 +1,35 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+#
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN35
+###############################################################################
+OPP_DCN35 = dcn35_opp.o
+
+AMD_DAL_OPP_DCN35 = $(addprefix $(AMDDALPATH)/dc/opp/dcn35/,$(OPP_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPP_DCN35)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
index 3542b51c9aac..3542b51c9aac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
index a9a413527801..a9a413527801 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index de83761edce8..6bbbf313b2bb 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -121,6 +121,17 @@ static bool optc31_enable_crtc(struct timing_generator *optc)
static bool optc31_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SEG0_SRC_SEL, 0xf,
+ OPTC_SEG1_SRC_SEL, 0xf,
+ OPTC_SEG2_SRC_SEL, 0xf,
+ OPTC_SEG3_SRC_SEL, 0xf,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0);
+
+ REG_UPDATE(OPTC_MEMORY_CONFIG,
+ OPTC_MEM_SEL, 0);
+
/* disable otg request until end of the first line
* in the vertical blank region
*/
diff --git a/drivers/gpu/drm/amd/display/dc/pg/Makefile b/drivers/gpu/drm/amd/display/dc/pg/Makefile
new file mode 100644
index 000000000000..ec11d3157a57
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/pg/Makefile
@@ -0,0 +1,35 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+#
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN35
+###############################################################################
+PG_DCN35 = dcn35_pg_cntl.o
+
+AMD_DAL_PG_DCN35 = $(addprefix $(AMDDALPATH)/dc/pg/dcn35/,$(PG_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_PG_DCN35)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
index 53bd0ae4bab5..af21c0a27f86 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
@@ -359,7 +359,7 @@ void pg_cntl35_mpcc_pg_control(struct pg_cntl *pg_cntl,
if (pg_cntl->ctx->dc->idle_optimizations_allowed)
return;
- if (mpcc_inst >= 0 && mpcc_inst < MAX_PIPES)
+ if (mpcc_inst < MAX_PIPES)
pg_cntl->pg_pipe_res_enable[PG_MPCC][mpcc_inst] = power_on;
}
@@ -369,7 +369,7 @@ void pg_cntl35_opp_pg_control(struct pg_cntl *pg_cntl,
if (pg_cntl->ctx->dc->idle_optimizations_allowed)
return;
- if (opp_inst >= 0 && opp_inst < MAX_PIPES)
+ if (opp_inst < MAX_PIPES)
pg_cntl->pg_pipe_res_enable[PG_OPP][opp_inst] = power_on;
}
@@ -379,7 +379,7 @@ void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl,
if (pg_cntl->ctx->dc->idle_optimizations_allowed)
return;
- if (optc_inst >= 0 && optc_inst < MAX_PIPES)
+ if (optc_inst < MAX_PIPES)
pg_cntl->pg_pipe_res_enable[PG_OPTC][optc_inst] = power_on;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.h
index 3de240884d22..3de240884d22 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index abc2405b7167..4860bb2531a1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -166,7 +166,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN316)
###############################################################################
-RESOURCE_DCN32 = dcn32_resource.o
+RESOURCE_DCN32 = dcn32_resource.o dcn32_resource_helpers.o
AMD_DAL_RESOURCE_DCN32 = $(addprefix $(AMDDALPATH)/dc/resource/dcn32/,$(RESOURCE_DCN32))
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index c78675f8be8a..e783afbbb397 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -1612,7 +1612,7 @@ unsigned int dcn20_calc_max_scaled_time(
if (time_per_byte == 0)
time_per_byte = 1;
- small_free_entry = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry;
+ small_free_entry = total_c_free_entry;
max_free_entry = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry;
buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */
max_scaled_time = buf_lh_capability - urgent_watermark;
@@ -1666,8 +1666,6 @@ void dcn20_set_mcif_arb_params(
if (dwb_pipe >= MAX_DWB_PIPES)
return;
}
- if (dwb_pipe >= MAX_DWB_PIPES)
- return;
}
}
@@ -2630,7 +2628,7 @@ static bool dcn20_resource_construct(
ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
- if (pool->base.pp_smu->nv_funcs.set_wm_ranges)
+ if (pool->base.pp_smu && pool->base.pp_smu->nv_funcs.set_wm_ranges)
pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 070a4efb308b..131d98025bd4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -795,11 +795,13 @@ static struct link_encoder *dcn201_link_encoder_create(
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
- struct dcn10_link_encoder *enc10 = &enc20->enc10;
+ struct dcn10_link_encoder *enc10;
if (!enc20)
return NULL;
+ enc10 = &enc20->enc10;
+
dcn201_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 0cb2cc56d973..5040a4c6ed18 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1419,10 +1419,7 @@ void dcn30_set_mcif_arb_params(
if (dwb_pipe >= MAX_DWB_PIPES)
return;
}
- if (dwb_pipe >= MAX_DWB_PIPES)
- return;
}
-
}
static struct dc_cap_funcs cap_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index f147c899a95e..969658313fd6 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1687,6 +1687,8 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
phantom_stream = dc_state_create_phantom_stream(dc, context, ref_pipe->stream);
+ if (!phantom_stream)
+ return phantom_stream;
/* stream has limited viewport and small timing */
memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
@@ -2011,11 +2013,9 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt;
+ struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
- dml2_opt = kmemdup(&dc->dml2_options, sizeof(dc->dml2_options), GFP_KERNEL);
- if (!dml2_opt)
- return;
+ memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
DC_FP_START();
@@ -2030,8 +2030,6 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
-
- kfree(dml2_opt);
}
static struct resource_funcs dcn32_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
index d184105ce2b3..d184105ce2b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 07ca6f58447d..9a3cc0514a36 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1581,11 +1581,9 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt;
+ struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
- dml2_opt = kmemdup(&dc->dml2_options, sizeof(dc->dml2_options), GFP_KERNEL);
- if (!dml2_opt)
- return;
+ memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
DC_FP_START();
@@ -1600,8 +1598,6 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
-
- kfree(dml2_opt);
}
static struct resource_funcs dcn321_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 28c459907698..0094ef223c5d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -758,7 +758,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.symclk32_se = true,
.symclk32_le = true,
.symclk_fe = true,
- .physymclk = true,
+ .physymclk = false,
.dpiasymclk = true,
}
},
@@ -785,6 +785,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.ips2_entry_delay_us = 800,
.disable_dmub_reallow_idle = false,
.static_screen_wait_frames = 2,
+ .disable_timeout = true,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1891,6 +1892,10 @@ static bool dcn35_resource_construct(
*/
dc->caps.max_disp_clock_khz_at_vmin = 650000;
+ /* Sequential ONO is based on ASIC. */
+ if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ dc->caps.sequential_ono = true;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 203fd4aaf9f9..4c5e722baa3a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -738,7 +738,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.symclk32_se = true,
.symclk32_le = true,
.symclk_fe = true,
- .physymclk = true,
+ .physymclk = false,
.dpiasymclk = true,
}
},
@@ -765,6 +765,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.ips2_entry_delay_us = 800,
.disable_dmub_reallow_idle = false,
.static_screen_wait_frames = 2,
+ .notify_dpia_hr_bw = true,
};
static const struct dc_panel_config panel_config_defaults = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 49a8d7048194..74fb21b88f12 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -731,8 +731,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.force_cositing = CHROMA_COSITING_TOPLEFT + 1,
- .disable_idle_power_optimizations = true,
- .edp_oled_no_backlight_enable = true,
};
static struct dce_aux *dcn401_aux_engine_create(
@@ -1582,11 +1580,9 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct dml2_configuration_options *dml2_opt;
+ struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
- dml2_opt = kmemdup(&dc->dml2_options, sizeof(*dml2_opt), GFP_KERNEL);
- if (!dml2_opt)
- return;
+ memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
DC_FP_START();
@@ -1601,8 +1597,6 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source);
DC_FP_END();
-
- kfree(dml2_opt);
}
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index 2836f28fa3af..ac58991eebbc 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -170,19 +170,31 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i
bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count;
int h_active = spl_in->basic_out.output_size.width;
int v_active = spl_in->basic_out.output_size.height;
- int odm_slice_width = h_active / odm_slice_count;
+ int odm_slice_width;
struct spl_rect odm_rec;
- odm_rec.x = odm_slice_width * odm_slice_idx;
- odm_rec.width = is_last_odm_slice ?
- /* last slice width is the reminder of h_active */
- h_active - odm_slice_width * (odm_slice_count - 1) :
- /* odm slice width is the floor of h_active / count */
- odm_slice_width;
- odm_rec.y = 0;
- odm_rec.height = v_active;
+ if (spl_in->basic_out.odm_combine_factor > 0) {
+ odm_slice_width = h_active / odm_slice_count;
+ /*
+ * deprecated, caller must pass in odm slice rect i.e OPP input
+ * rect in timing active for the new interface.
+ */
+ if (spl_in->basic_out.use_two_pixels_per_container && (odm_slice_width % 2))
+ odm_slice_width++;
+
+ odm_rec.x = odm_slice_width * odm_slice_idx;
+ odm_rec.width = is_last_odm_slice ?
+ /* last slice width is the reminder of h_active */
+ h_active - odm_slice_width * (odm_slice_count - 1) :
+ /* odm slice width is the floor of h_active / count */
+ odm_slice_width;
+ odm_rec.y = 0;
+ odm_rec.height = v_active;
+
+ return odm_rec;
+ }
- return odm_rec;
+ return spl_in->basic_out.odm_slice_rect;
}
static void spl_calculate_recout(struct spl_in *spl_in, struct spl_out *spl_out)
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index a8f7fccfa16b..201201d3f55b 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -436,11 +436,13 @@ struct basic_out {
struct spl_size output_size; // Output Size
struct spl_rect dst_rect; // Destination Rect
struct spl_rect src_rect; // Source rect
- int odm_combine_factor; // ODM Combine Factor determine by get_odm_splits
+ int odm_combine_factor; // deprecated
+ struct spl_rect odm_slice_rect; // OPP input rect in timing active
enum spl_view_3d view_format; // TODO: View format Check if it is chroma subsampling
bool always_scale; // Is always scale enabled? Required for getting SCL_MODE
int max_downscale_src_width; // Required to get optimal no of taps
bool alpha_en;
+ bool use_two_pixels_per_container;
};
enum explicit_sharpness {
SHARPNESS_LOW = 0,
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index cd51c91a822b..85dcf6b4fe92 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -137,6 +137,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_HPD_IRQ,
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
DMUB_NOTIFICATION_DPIA_NOTIFICATION,
+ DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
DMUB_NOTIFICATION_MAX
};
@@ -560,6 +561,7 @@ struct dmub_notification {
* DPIA notification command.
*/
struct dmub_rb_cmd_dpia_notification dpia_notification;
+ struct dmub_rb_cmd_hpd_sense_notify_data hpd_sense_notify;
};
};
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 4db4c5ad5169..78e8698fe378 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -208,6 +208,11 @@ union abm_flags {
* @abm_new_frame: Indicates if a new frame update needed for ABM to ramp up into steady
*/
unsigned int abm_new_frame : 1;
+
+ /**
+ * @vb_scaling_enabled: Indicates variBright Scaling Enable
+ */
+ unsigned int vb_scaling_enabled : 1;
} bitfields;
unsigned int u32All;
@@ -558,6 +563,7 @@ union dmub_fw_meta {
//==============================================================================
//< DMUB Trace Buffer>================================================================
//==============================================================================
+#if !defined(TENSILICA) && !defined(DMUB_TRACE_ENTRY_DEFINED)
/**
* dmub_trace_code_t - firmware trace code, 32-bits
*/
@@ -572,6 +578,7 @@ struct dmcub_trace_buf_entry {
uint32_t param0; /**< trace defined parameter 0 */
uint32_t param1; /**< trace defined parameter 1 */
};
+#endif
//==============================================================================
//< DMUB_STATUS>================================================================
@@ -1285,6 +1292,10 @@ enum dmub_out_cmd_type {
* Command type used for USB4 DPIA notification
*/
DMUB_OUT_CMD__DPIA_NOTIFICATION = 5,
+ /**
+ * Command type used for HPD redetect notification
+ */
+ DMUB_OUT_CMD__HPD_SENSE_NOTIFY = 6,
};
/* DMUB_CMD__DPIA command sub-types. */
@@ -1829,7 +1840,8 @@ struct dmub_cmd_fams2_global_config {
uint32_t lock_wait_time_us; // time to forecast acquisition of lock
uint32_t num_streams;
union dmub_fams2_global_feature_config features;
- uint8_t pad[3];
+ uint32_t recovery_timeout_us;
+ uint32_t hwfq_flip_programming_delay_us;
};
union dmub_cmd_fams2_config {
@@ -1877,7 +1889,8 @@ struct dmub_rb_cmd_idle_opt_dcn_restore {
*/
struct dmub_dcn_notify_idle_cntl_data {
uint8_t driver_idle;
- uint8_t reserved[59];
+ uint8_t skip_otg_disable;
+ uint8_t reserved[58];
};
/**
@@ -2468,6 +2481,22 @@ struct dmub_rb_cmd_query_hpd_state {
struct dmub_cmd_hpd_state_query_data data;
};
+/**
+ * struct dmub_rb_cmd_hpd_sense_notify - HPD sense notification data.
+ */
+struct dmub_rb_cmd_hpd_sense_notify_data {
+ uint32_t old_hpd_sense_mask; /**< Old HPD sense mask */
+ uint32_t new_hpd_sense_mask; /**< New HPD sense mask */
+};
+
+/**
+ * struct dmub_rb_cmd_hpd_sense_notify - DMUB_OUT_CMD__HPD_SENSE_NOTIFY command.
+ */
+struct dmub_rb_cmd_hpd_sense_notify {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_rb_cmd_hpd_sense_notify_data data; /**< payload */
+};
+
/*
* Command IDs should be treated as stable ABI.
* Do not reuse or modify IDs.
@@ -2516,6 +2545,18 @@ enum dmub_cmd_psr_type {
DMUB_CMD__SET_PSR_POWER_OPT = 7,
};
+/**
+ * Different PSR residency modes.
+ * Different modes change the definition of PSR residency.
+ */
+enum psr_residency_mode {
+ PSR_RESIDENCY_MODE_PHY = 0,
+ PSR_RESIDENCY_MODE_ALPM,
+ PSR_RESIDENCY_MODE_ENABLEMENT_PERIOD,
+ /* Do not add below. */
+ PSR_RESIDENCY_MODE_LAST_ELEMENT,
+};
+
enum dmub_cmd_fams_type {
DMUB_CMD__FAMS_SETUP_FW_CTRL = 0,
DMUB_CMD__FAMS_DRR_UPDATE = 1,
@@ -2760,9 +2801,9 @@ struct dmub_cmd_psr_copy_settings_data {
*/
uint8_t relock_delay_frame_cnt;
/**
- * Explicit padding to 4 byte boundary.
+ * esd recovery indicate.
*/
- uint8_t pad3;
+ uint8_t esd_recovery;
/**
* DSC Slice height.
*/
@@ -2982,6 +3023,14 @@ struct dmub_cmd_update_dirty_rect_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
+ */
+ uint16_t coasting_vtotal_high;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -3337,7 +3386,25 @@ enum dmub_cmd_replay_type {
* Set adaptive sync sdp enabled
*/
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
+ /**
+ * Set Replay General command.
+ */
+ DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16,
+};
+/**
+ * Replay general command sub-types.
+ */
+enum dmub_cmd_replay_general_subtype {
+ REPLAY_GENERAL_CMD_NOT_SUPPORTED = -1,
+ /**
+ * TODO: For backward compatible, allow new command only.
+ * REPLAY_GENERAL_CMD_SET_TIMING_SYNC_SUPPORTED,
+ * REPLAY_GENERAL_CMD_SET_RESIDENCY_FRAMEUPDATE_TIMER,
+ * REPLAY_GENERAL_CMD_SET_PSEUDO_VTOTAL,
+ */
+ REPLAY_GENERAL_CMD_DISABLED_ADAPTIVE_SYNC_SDP,
+ REPLAY_GENERAL_CMD_DISABLED_DESYNC_ERROR_DETECTION,
};
/**
@@ -3553,6 +3620,26 @@ struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data {
uint8_t pad[2];
};
+struct dmub_cmd_replay_set_general_cmd_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * subtype: replay general cmd sub type
+ */
+ uint8_t subtype;
+
+ uint8_t pad[2];
+ /**
+ * config data with param1 and param2
+ */
+ uint32_t param1;
+
+ uint32_t param2;
+};
/**
* Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
@@ -3671,6 +3758,20 @@ struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp {
};
/**
+ * Definition of a DMUB_CMD__REPLAY_SET_GENERAL_CMD command.
+ */
+struct dmub_rb_cmd_replay_set_general_cmd {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_GENERAL_CMD command.
+ */
+ struct dmub_cmd_replay_set_general_cmd_data data;
+};
+
+/**
* Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
*/
struct dmub_cmd_replay_frameupdate_timer_data {
@@ -3729,7 +3830,10 @@ union dmub_replay_cmd_set {
* Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data.
*/
struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
-
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_GENERAL_CMD command data.
+ */
+ struct dmub_cmd_replay_set_general_cmd_data set_general_cmd_data;
};
/**
@@ -3916,6 +4020,11 @@ enum dmub_cmd_abm_type {
* Set ABM Events
*/
DMUB_CMD__ABM_SET_EVENT = 9,
+
+ /**
+ * Get the current ACE curve.
+ */
+ DMUB_CMD__ABM_GET_ACE_CURVE = 10,
};
struct abm_ace_curve {
@@ -4445,6 +4554,55 @@ struct dmub_rb_cmd_abm_query_caps {
};
/**
+ * enum dmub_abm_ace_curve_type - ACE curve type.
+ */
+enum dmub_abm_ace_curve_type {
+ /**
+ * ACE curve as defined by the SW layer.
+ */
+ ABM_ACE_CURVE_TYPE__SW = 0,
+ /**
+ * ACE curve as defined by the SW to HW translation interface layer.
+ */
+ ABM_ACE_CURVE_TYPE__SW_IF = 1,
+};
+
+/**
+ * Definition of a DMUB_CMD__ABM_GET_ACE_CURVE command.
+ */
+struct dmub_rb_cmd_abm_get_ace_curve {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Address where ACE curve should be copied.
+ */
+ union dmub_addr dest;
+
+ /**
+ * Type of ACE curve being queried.
+ */
+ enum dmub_abm_ace_curve_type ace_type;
+
+ /**
+ * Indirect buffer length.
+ */
+ uint16_t bytes;
+
+ /**
+ * eDP panel instance.
+ */
+ uint8_t panel_inst;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad;
+};
+
+/**
* Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
*/
struct dmub_rb_cmd_abm_save_restore {
@@ -4477,6 +4635,7 @@ struct dmub_rb_cmd_abm_save_restore {
/**
* Data passed from driver to FW in a DMUB_CMD__ABM_SET_EVENT command.
*/
+
struct dmub_cmd_abm_set_event_data {
/**
@@ -5059,6 +5218,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_query_caps abm_query_caps;
/**
+ * Definition of a DMUB_CMD__ABM_GET_ACE_CURVE command.
+ */
+ struct dmub_rb_cmd_abm_get_ace_curve abm_get_ace_curve;
+
+ /**
* Definition of a DMUB_CMD__ABM_SET_EVENT command.
*/
struct dmub_rb_cmd_abm_set_event abm_set_event;
@@ -5170,6 +5334,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp;
/**
+ * Definition of a DMUB_CMD__REPLAY_SET_GENERAL_CMD command.
+ */
+ struct dmub_rb_cmd_replay_set_general_cmd replay_set_general_cmd;
+ /**
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/
struct dmub_rb_cmd_assr_enable assr_enable;
@@ -5204,6 +5372,10 @@ union dmub_rb_out_cmd {
* DPIA notification command.
*/
struct dmub_rb_cmd_dpia_notification dpia_notification;
+ /**
+ * HPD sense notification command.
+ */
+ struct dmub_rb_cmd_hpd_sense_notify hpd_sense_notify;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 7f53074f4e48..80da117356c0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -462,7 +462,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub)
void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
- uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ uint32_t is_traceport_enabled, is_cw6_enabled;
if (!dmub || !diag_data)
return;
@@ -513,9 +513,6 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
diag_data->is_traceport_en = is_traceport_enabled;
- REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
-
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 74189102eaec..cce887cefc01 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -113,6 +113,12 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
notify->result = DPIA_BW_ALLOC_CAPS_CHANGED;
}
break;
+ case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
+ notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
+ dmub_memcpy(&notify->hpd_sense_notify,
+ &cmd.hpd_sense_notify.data,
+ sizeof(cmd.hpd_sense_notify.data));
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 530379508a69..3cd52e7a9c77 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -973,6 +973,20 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
return true;
}
+void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t vtotal)
+{
+ link->replay_settings.defer_update_coasting_vtotal_table[type] = vtotal;
+}
+
+void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
+ enum replay_coasting_vtotal_type type)
+{
+ link->replay_settings.coasting_vtotal_table[type] =
+ link->replay_settings.defer_update_coasting_vtotal_table[type];
+}
+
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint32_t vtotal)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index ff7e6f3cd6be..cac302e8fa10 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -57,6 +57,11 @@ void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint32_t vtotal);
+void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t vtotal);
+void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
+ enum replay_coasting_vtotal_type type);
void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 6bb42d04b247..e8b6989a40f3 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index f324a8ef8032..a1baa13ab2c2 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7648,7 +7648,6 @@ static int si_dpm_late_init(void *handle)
static int si_dpm_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err;
DRM_DEBUG("\n");
@@ -7708,11 +7707,10 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s_smc.bin", chip_name);
if (err) {
- DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
- err, fw_name);
+ DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s_smc.bin\"\n",
+ err, chip_name);
amdgpu_ucode_release(&adev->pm.fw);
}
return err;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
index 2cf2a7b12623..7711e892c31f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
@@ -163,8 +163,8 @@ typedef struct _ATOM_Tonga_State {
typedef struct _ATOM_Tonga_State_Array {
UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */
+ UCHAR ucNumEntries;
+ ATOM_Tonga_State entries[] __counted_by(ucNumEntries);
} ATOM_Tonga_State_Array;
typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
@@ -178,8 +178,8 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ UCHAR ucNumEntries;
+ ATOM_Tonga_MCLK_Dependency_Record entries[] __counted_by(ucNumEntries);
} ATOM_Tonga_MCLK_Dependency_Table;
typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
@@ -193,8 +193,8 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ UCHAR ucNumEntries;
+ ATOM_Tonga_SCLK_Dependency_Record entries[] __counted_by(ucNumEntries);
} ATOM_Tonga_SCLK_Dependency_Table;
typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
@@ -209,8 +209,8 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ UCHAR ucNumEntries;
+ ATOM_Polaris_SCLK_Dependency_Record entries[] __counted_by(ucNumEntries);
} ATOM_Polaris_SCLK_Dependency_Table;
typedef struct _ATOM_Tonga_PCIE_Record {
@@ -221,8 +221,8 @@ typedef struct _ATOM_Tonga_PCIE_Record {
typedef struct _ATOM_Tonga_PCIE_Table {
UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */
+ UCHAR ucNumEntries;
+ ATOM_Tonga_PCIE_Record entries[] __counted_by(ucNumEntries);
} ATOM_Tonga_PCIE_Table;
typedef struct _ATOM_Polaris10_PCIE_Record {
@@ -234,8 +234,8 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
typedef struct _ATOM_Polaris10_PCIE_Table {
UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */
+ UCHAR ucNumEntries;
+ ATOM_Polaris10_PCIE_Record entries[] __counted_by(ucNumEntries);
} ATOM_Polaris10_PCIE_Table;
@@ -251,8 +251,8 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
typedef struct _ATOM_Tonga_MM_Dependency_Table {
UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ UCHAR ucNumEntries;
+ ATOM_Tonga_MM_Dependency_Record entries[] __counted_by(ucNumEntries);
} ATOM_Tonga_MM_Dependency_Table;
typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
@@ -264,8 +264,8 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */
+ UCHAR ucNumEntries;
+ ATOM_Tonga_Voltage_Lookup_Record entries[] __counted_by(ucNumEntries);
} ATOM_Tonga_Voltage_Lookup_Table;
typedef struct _ATOM_Tonga_Fan_Table {
@@ -367,7 +367,7 @@ typedef struct _ATOM_Tonga_VCE_State_Record {
typedef struct _ATOM_Tonga_VCE_State_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Tonga_VCE_State_Record entries[];
+ ATOM_Tonga_VCE_State_Record entries[] __counted_by(ucNumEntries);
} ATOM_Tonga_VCE_State_Table;
typedef struct _ATOM_Tonga_PowerTune_Table {
@@ -481,7 +481,7 @@ typedef struct _ATOM_Tonga_Hard_Limit_Record {
typedef struct _ATOM_Tonga_Hard_Limit_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Tonga_Hard_Limit_Record entries[];
+ ATOM_Tonga_Hard_Limit_Record entries[] __counted_by(ucNumEntries);
} ATOM_Tonga_Hard_Limit_Table;
typedef struct _ATOM_Tonga_GPIO_Table {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 69928a4a074b..9118fcddbf11 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -60,7 +60,7 @@ struct vi_dpm_level {
struct vi_dpm_table {
uint32_t count;
- struct vi_dpm_level dpm_level[];
+ struct vi_dpm_level dpm_level[] __counted_by(count);
};
#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
@@ -91,7 +91,7 @@ struct phm_set_power_state_input {
struct phm_clock_array {
uint32_t count;
- uint32_t values[];
+ uint32_t values[] __counted_by(count);
};
struct phm_clock_voltage_dependency_record {
@@ -122,8 +122,8 @@ struct phm_acpclock_voltage_dependency_record {
};
struct phm_clock_voltage_dependency_table {
- uint32_t count; /* Number of entries. */
- struct phm_clock_voltage_dependency_record entries[]; /* Dynamically allocate count entries. */
+ uint32_t count;
+ struct phm_clock_voltage_dependency_record entries[] __counted_by(count);
};
struct phm_phase_shedding_limits_record {
@@ -140,7 +140,7 @@ struct phm_uvd_clock_voltage_dependency_record {
struct phm_uvd_clock_voltage_dependency_table {
uint8_t count;
- struct phm_uvd_clock_voltage_dependency_record entries[];
+ struct phm_uvd_clock_voltage_dependency_record entries[] __counted_by(count);
};
struct phm_acp_clock_voltage_dependency_record {
@@ -150,7 +150,7 @@ struct phm_acp_clock_voltage_dependency_record {
struct phm_acp_clock_voltage_dependency_table {
uint32_t count;
- struct phm_acp_clock_voltage_dependency_record entries[];
+ struct phm_acp_clock_voltage_dependency_record entries[] __counted_by(count);
};
struct phm_vce_clock_voltage_dependency_record {
@@ -160,33 +160,33 @@ struct phm_vce_clock_voltage_dependency_record {
};
struct phm_phase_shedding_limits_table {
- uint32_t count;
- struct phm_phase_shedding_limits_record entries[];
+ uint32_t count;
+ struct phm_phase_shedding_limits_record entries[] __counted_by(count);
};
struct phm_vceclock_voltage_dependency_table {
- uint8_t count; /* Number of entries. */
- struct phm_vceclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
+ uint8_t count;
+ struct phm_vceclock_voltage_dependency_record entries[] __counted_by(count);
};
struct phm_uvdclock_voltage_dependency_table {
- uint8_t count; /* Number of entries. */
- struct phm_uvdclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
+ uint8_t count;
+ struct phm_uvdclock_voltage_dependency_record entries[] __counted_by(count);
};
struct phm_samuclock_voltage_dependency_table {
- uint8_t count; /* Number of entries. */
- struct phm_samuclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
+ uint8_t count;
+ struct phm_samuclock_voltage_dependency_record entries[] __counted_by(count);
};
struct phm_acpclock_voltage_dependency_table {
- uint32_t count; /* Number of entries. */
- struct phm_acpclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */
+ uint32_t count;
+ struct phm_acpclock_voltage_dependency_record entries[] __counted_by(count);
};
struct phm_vce_clock_voltage_dependency_table {
uint8_t count;
- struct phm_vce_clock_voltage_dependency_record entries[];
+ struct phm_vce_clock_voltage_dependency_record entries[] __counted_by(count);
};
@@ -393,7 +393,7 @@ union phm_cac_leakage_record {
struct phm_cac_leakage_table {
uint32_t count;
- union phm_cac_leakage_record entries[];
+ union phm_cac_leakage_record entries[] __counted_by(count);
};
struct phm_samu_clock_voltage_dependency_record {
@@ -404,7 +404,7 @@ struct phm_samu_clock_voltage_dependency_record {
struct phm_samu_clock_voltage_dependency_table {
uint8_t count;
- struct phm_samu_clock_voltage_dependency_record entries[];
+ struct phm_samu_clock_voltage_dependency_record entries[] __counted_by(count);
};
struct phm_cac_tdp_table {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 6f742d88867d..8e694b576d1c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -325,6 +325,18 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
return ret;
}
+static int smu_set_mall_enable(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->ppt_funcs->set_mall_enable)
+ return 0;
+
+ ret = smu->ppt_funcs->set_mall_enable(smu);
+
+ return ret;
+}
+
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -1804,6 +1816,7 @@ static int smu_hw_init(void *handle)
smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
+ smu_set_mall_enable(smu);
smu_set_gfx_cgpg(smu, true);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3f94c33df7b7..a34c802f52be 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1409,6 +1409,11 @@ struct pptable_funcs {
int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
/**
+ * @set_mall_enable: Init MALL power gating control.
+ */
+ int (*set_mall_enable)(struct smu_context *smu);
+
+ /**
* @notify_rlc_state: Notify RLC power state to SMU.
*/
int (*notify_rlc_state)(struct smu_context *smu, bool en);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
index c4dc5881d8df..e7f5ef49049f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
@@ -106,8 +106,8 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
-#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
-#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_MSG_MALLPowerController 0x38 ///< Set MALL control
+#define PPSMC_MSG_MALLPowerState 0x39 ///< Enter/Exit MALL PG
#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index dff36bd7a17c..12a7b0634ed5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -273,7 +273,9 @@
__SMU_DUMMY_MAP(GetMetricsVersion), \
__SMU_DUMMY_MAP(EnableUCLKShadow), \
__SMU_DUMMY_MAP(RmaDueToBadPageThreshold),\
- __SMU_DUMMY_MAP(SelectPstatePolicy),
+ __SMU_DUMMY_MAP(SelectPstatePolicy), \
+ __SMU_DUMMY_MAP(MALLPowerController), \
+ __SMU_DUMMY_MAP(MALLPowerState),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index cf556f1b5ed1..076620fa3ef5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1389,8 +1389,6 @@ static int navi10_emit_clk_levels(struct smu_context *smu,
case 2:
curve_settings = &od_table->GfxclkFreq3;
break;
- default:
- break;
}
*offset += sysfs_emit_at(buf, *offset, "%d: %uMHz %umV\n",
i, curve_settings[0],
@@ -1594,8 +1592,6 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case 2:
curve_settings = &od_table->GfxclkFreq3;
break;
- default:
- break;
}
size += sysfs_emit_at(buf, size, "%d: %uMHz %umV\n",
i, curve_settings[0],
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 9d5ab2ea643a..16fcd9dcd202 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -94,7 +94,6 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
char ucode_prefix[25];
- char fw_name[SMU_FW_NAME_LEN];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -106,10 +105,7 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
-
- err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 825786fc849e..2c35eb31475a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1863,7 +1863,6 @@ static int aldebaran_mode1_reset(struct smu_context *smu)
u32 fatal_err, param;
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
fatal_err = 0;
param = SMU_RESET_MODE_1;
@@ -1876,8 +1875,8 @@ static int aldebaran_mode1_reset(struct smu_context *smu)
} else {
/* fatal error triggered by ras, PMFW supports the flag
from 68.44.0 */
- if ((smu->smc_fw_version >= 0x00442c00) && ras &&
- atomic_read(&ras->in_recovery))
+ if ((smu->smc_fw_version >= 0x00442c00) &&
+ amdgpu_ras_get_fed_status(adev))
fatal_err = 1;
param |= (fatal_err << 16);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 3a50076e44f0..e17466cc1952 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -92,7 +92,6 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char fw_name[30];
char ucode_prefix[15];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
@@ -104,10 +103,7 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
-
- err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 6c24e2306383..a887ab945dfa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2786,10 +2786,9 @@ static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu,
uint32_t *param)
{
struct amdgpu_device *adev = smu->adev;
- struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if ((smu->smc_fw_version >= supported_version) &&
- ras && atomic_read(&ras->in_recovery))
+ amdgpu_ras_get_fed_status(adev))
/* Set RAS fatal error reset flag */
*param = 1 << 16;
else
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 6b8decaf6427..78c3f94bb3ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -272,7 +272,6 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
uint32_t p2s_table_id = P2S_TABLE_ID_A;
int ret = 0, i, p2stable_count;
char ucode_prefix[15];
- char fw_name[30];
/* No need to load P2S tables in IOV mode */
if (amdgpu_sriov_vf(adev))
@@ -283,10 +282,7 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
-
- ret = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ ret = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
if (ret)
goto out;
@@ -2578,24 +2574,14 @@ failed:
static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- struct amdgpu_hive_info *hive = NULL;
- u32 hive_ras_recovery = 0;
- struct amdgpu_ras *ras;
u32 fatal_err, param;
int ret = 0;
- hive = amdgpu_get_xgmi_hive(adev);
- ras = amdgpu_ras_get_context(adev);
fatal_err = 0;
param = SMU_RESET_MODE_1;
- if (hive) {
- hive_ras_recovery = atomic_read(&hive->ras_recovery);
- amdgpu_put_xgmi_hive(hive);
- }
-
/* fatal error triggered by ras, PMFW supports the flag */
- if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
+ if (amdgpu_ras_get_fed_status(adev))
fatal_err = 1;
param |= (fatal_err << 16);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 8cce17d1f230..5ffd7144d99e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -68,7 +68,6 @@ MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
int smu_v14_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char fw_name[30];
char ucode_prefix[15];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
@@ -80,10 +79,7 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
-
- err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index e4419e1561ef..18abfbd6d059 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -52,6 +52,19 @@
#define mmMP1_SMN_C2PMSG_90 0x029a
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+/* MALLPowerController message arguments (Defines for the Cache mode control) */
+#define SMU_MALL_PMFW_CONTROL 0
+#define SMU_MALL_DRIVER_CONTROL 1
+
+/*
+ * MALLPowerState message arguments
+ * (Defines for the Allocate/Release Cache mode if in driver mode)
+ */
+#define SMU_MALL_EXIT_PG 0
+#define SMU_MALL_ENTER_PG 1
+
+#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -66,6 +79,12 @@
FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
FEATURE_MASK(FEATURE_VPE_DPM_BIT))
+enum smu_mall_pg_config {
+ SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0,
+ SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON = 1,
+ SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF = 2,
+};
+
static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
@@ -113,6 +132,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1),
MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1),
MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1),
+ MSG_MAP(MALLPowerController, PPSMC_MSG_MALLPowerController, 1),
+ MSG_MAP(MALLPowerState, PPSMC_MSG_MALLPowerState, 1),
};
static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -1423,6 +1444,57 @@ static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_cl
return 0;
}
+static int smu_v14_0_1_init_mall_power_gating(struct smu_context *smu, enum smu_mall_pg_config pg_config)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (pg_config == SMU_MALL_PG_CONFIG_PMFW_CONTROL) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
+ SMU_MALL_PMFW_CONTROL, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Init MALL PMFW CONTROL Failure\n");
+ return ret;
+ }
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
+ SMU_MALL_DRIVER_CONTROL, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Init MALL Driver CONTROL Failure\n");
+ return ret;
+ }
+
+ if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
+ SMU_MALL_EXIT_PG, NULL);
+ if (ret) {
+ dev_err(adev->dev, "EXIT MALL PG Failure\n");
+ return ret;
+ }
+ } else if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
+ SMU_MALL_ENTER_PG, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Enter MALL PG Failure\n");
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_common_set_mall_enable(struct smu_context *smu)
+{
+ enum smu_mall_pg_config pg_config = SMU_MALL_PG_CONFIG_DEFAULT;
+ int ret = 0;
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ ret = smu_v14_0_1_init_mall_power_gating(smu, pg_config);
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1454,6 +1526,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
+ .set_mall_enable = smu_v14_0_common_set_mall_enable,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 21d27e6235f3..b11f7c5bbcbe 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1619,6 +1619,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =